HexYang commited on
Commit
64bd4a3
·
verified ·
1 Parent(s): 8ad17e9

Upload run_roberta_train_dataset.py

Browse files
Files changed (1) hide show
  1. run_roberta_train_dataset.py +503 -0
run_roberta_train_dataset.py ADDED
@@ -0,0 +1,503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2020 The HuggingFace Team All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) on a text file or a dataset.
18
+
19
+ Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
20
+ https://huggingface.co/models?filter=fill-mask
21
+ """
22
+ # You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments.
23
+
24
+ import logging
25
+ import math
26
+ import os
27
+ import sys
28
+ import warnings
29
+ from dataclasses import dataclass, field
30
+ from typing import Optional
31
+
32
+ import datasets
33
+ import evaluate
34
+ import torch
35
+ from datasets import DatasetDict
36
+
37
+ import transformers
38
+ from transformers import (
39
+ CONFIG_MAPPING,
40
+ MODEL_FOR_MASKED_LM_MAPPING,
41
+ AutoConfig,
42
+ AutoModelForMaskedLM,
43
+ DataCollatorForLanguageModeling,
44
+ HfArgumentParser,
45
+ Trainer,
46
+ TrainingArguments,
47
+ is_torch_xla_available,
48
+ set_seed,
49
+ )
50
+ from transformers.trainer_utils import get_last_checkpoint
51
+ from transformers.utils.versions import require_version
52
+ sys.path.append(os.path.abspath(os.path.dirname(__file__) + "/.."))
53
+
54
+ from dna_tokenizer_fast import DNATokenizerFast
55
+ sys.path.append(os.path.join(os.path.dirname(__file__), "accuracy"))
56
+
57
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
58
+ # check_min_version("4.40.0.dev0")
59
+
60
+ require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
61
+
62
+ logger = logging.getLogger(__name__)
63
+ MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
64
+ MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
65
+
66
+
67
+ @dataclass
68
+ class ModelArguments:
69
+ """
70
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
71
+ """
72
+
73
+ model_name_or_path: Optional[str] = field(
74
+ default=None,
75
+ metadata={
76
+ "help": (
77
+ "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch."
78
+ )
79
+ },
80
+ )
81
+ model_type: Optional[str] = field(
82
+ default=None,
83
+ metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
84
+ )
85
+ config_overrides: Optional[str] = field(
86
+ default=None,
87
+ metadata={
88
+ "help": (
89
+ "Override some existing default config settings when a model is trained from scratch. Example: "
90
+ "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
91
+ )
92
+ },
93
+ )
94
+ config_name: Optional[str] = field(
95
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
96
+ )
97
+ tokenizer_name: Optional[str] = field(
98
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
99
+ )
100
+ cache_dir: Optional[str] = field(
101
+ default=None,
102
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
103
+ )
104
+ use_fast_tokenizer: bool = field(
105
+ default=True,
106
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
107
+ )
108
+ model_revision: str = field(
109
+ default="main",
110
+ metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
111
+ )
112
+ token: str = field(
113
+ default=None,
114
+ metadata={
115
+ "help": (
116
+ "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
117
+ "generated when running `huggingface-cli login` (stored in `~/.huggingface`)."
118
+ )
119
+ },
120
+ )
121
+ use_auth_token: bool = field(
122
+ default=None,
123
+ metadata={
124
+ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead."
125
+ },
126
+ )
127
+ trust_remote_code: bool = field(
128
+ default=False,
129
+ metadata={
130
+ "help": (
131
+ "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option "
132
+ "should only be set to `True` for repositories you trust and in which you have read the code, as it will "
133
+ "execute code present on the Hub on your local machine."
134
+ )
135
+ },
136
+ )
137
+ torch_dtype: Optional[str] = field(
138
+ default=None,
139
+ metadata={
140
+ "help": (
141
+ "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the "
142
+ "dtype will be automatically derived from the model's weights."
143
+ ),
144
+ "choices": ["auto", "bfloat16", "float16", "float32"],
145
+ },
146
+ )
147
+ low_cpu_mem_usage: bool = field(
148
+ default=False,
149
+ metadata={
150
+ "help": (
151
+ "It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded. "
152
+ "set True will benefit LLM loading time and RAM consumption."
153
+ )
154
+ },
155
+ )
156
+
157
+ def __post_init__(self):
158
+ if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
159
+ raise ValueError(
160
+ "--config_overrides can't be used in combination with --config_name or --model_name_or_path"
161
+ )
162
+
163
+
164
+ @dataclass
165
+ class DataTrainingArguments:
166
+ """
167
+ Arguments pertaining to what data we are going to input our model for training and eval.
168
+ """
169
+
170
+ dataset_name: Optional[str] = field(
171
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
172
+ )
173
+ dataset_config_name: Optional[str] = field(
174
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
175
+ )
176
+ train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
177
+ validation_file: Optional[str] = field(
178
+ default=None,
179
+ metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
180
+ )
181
+ overwrite_cache: bool = field(
182
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
183
+ )
184
+ validation_split_percentage: Optional[int] = field(
185
+ default=5,
186
+ metadata={
187
+ "help": "The percentage of the train set used as validation set in case there's no validation split"
188
+ },
189
+ )
190
+ max_seq_length: Optional[int] = field(
191
+ default=None,
192
+ metadata={
193
+ "help": (
194
+ "The maximum total input sequence length after tokenization. Sequences longer "
195
+ "than this will be truncated."
196
+ )
197
+ },
198
+ )
199
+ preprocessing_num_workers: Optional[int] = field(
200
+ default=None,
201
+ metadata={"help": "The number of processes to use for the preprocessing."},
202
+ )
203
+ mlm_probability: float = field(
204
+ default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
205
+ )
206
+ line_by_line: bool = field(
207
+ default=False,
208
+ metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
209
+ )
210
+ pad_to_max_length: bool = field(
211
+ default=False,
212
+ metadata={
213
+ "help": (
214
+ "Whether to pad all samples to `max_seq_length`. "
215
+ "If False, will pad the samples dynamically when batching to the maximum length in the batch."
216
+ )
217
+ },
218
+ )
219
+ max_train_samples: Optional[int] = field(
220
+ default=None,
221
+ metadata={
222
+ "help": (
223
+ "For debugging purposes or quicker training, truncate the number of training examples to this "
224
+ "value if set."
225
+ )
226
+ },
227
+ )
228
+ max_eval_samples: Optional[int] = field(
229
+ default=None,
230
+ metadata={
231
+ "help": (
232
+ "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
233
+ "value if set."
234
+ )
235
+ },
236
+ )
237
+ streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"})
238
+
239
+ def main():
240
+ # See all possible arguments in src/transformers/training_args.py
241
+ # or by passing the --help flag to this script.
242
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
243
+
244
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
245
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
246
+ # If we pass only one argument to the script and it's the path to a json file,
247
+ # let's parse it to get our arguments.
248
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
249
+ else:
250
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
251
+
252
+ if model_args.use_auth_token is not None:
253
+ warnings.warn(
254
+ "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.",
255
+ FutureWarning,
256
+ )
257
+ if model_args.token is not None:
258
+ raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
259
+ model_args.token = model_args.use_auth_token
260
+
261
+ # Setup logging
262
+ logging.basicConfig(
263
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
264
+ datefmt="%m/%d/%Y %H:%M:%S",
265
+ handlers=[logging.StreamHandler(sys.stdout)],
266
+ )
267
+
268
+ if training_args.should_log:
269
+ # The default of training_args.log_level is passive, so we set log level at info here to have that default.
270
+ transformers.utils.logging.set_verbosity_info()
271
+
272
+ log_level = training_args.get_process_log_level()
273
+ logger.setLevel(log_level)
274
+ datasets.utils.logging.set_verbosity(log_level)
275
+ transformers.utils.logging.set_verbosity(log_level)
276
+ transformers.utils.logging.enable_default_handler()
277
+ transformers.utils.logging.enable_explicit_format()
278
+
279
+ # Log on each process the small summary:
280
+ logger.warning(
281
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
282
+ + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
283
+ )
284
+ # Set the verbosity to info of the Transformers logger (on main process only):
285
+ logger.info(f"Training/evaluation parameters {training_args}")
286
+
287
+ # logger.info(f"xiao_print change output_dir, {str(datetime.datetime.now())[:-3]}")
288
+ # import time
289
+ # training_args.output_dir = training_args.output_dir + str(time.time())[-4:]
290
+
291
+ # Detecting last checkpoint.
292
+ last_checkpoint = None
293
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
294
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
295
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
296
+ raise ValueError(
297
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
298
+ "Use --overwrite_output_dir to overcome."
299
+ )
300
+ elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
301
+ logger.info(
302
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
303
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
304
+ )
305
+
306
+ # Set seed before initializing model.
307
+ set_seed(training_args.seed)
308
+
309
+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
310
+ # https://huggingface.co/docs/datasets/loading_datasets.
311
+
312
+ # Load pretrained model and tokenizer
313
+ #
314
+ # Distributed training:
315
+ # The .from_pretrained methods guarantee that only one local process can concurrently
316
+ # download model & vocab.
317
+ config_kwargs = {
318
+ "cache_dir": model_args.cache_dir,
319
+ "revision": model_args.model_revision,
320
+ "token": model_args.token,
321
+ "trust_remote_code": model_args.trust_remote_code,
322
+ }
323
+ if model_args.config_name:
324
+ config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
325
+ elif model_args.model_name_or_path:
326
+ config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
327
+ else:
328
+ config = CONFIG_MAPPING[model_args.model_type]()
329
+ logger.warning("You are instantiating a new config instance from scratch.")
330
+ if model_args.config_overrides is not None:
331
+ logger.info(f"Overriding config: {model_args.config_overrides}")
332
+ config.update_from_string(model_args.config_overrides)
333
+ logger.info(f"New config: {config}")
334
+
335
+ tokenizer_kwargs = {
336
+ "cache_dir": model_args.cache_dir,
337
+ "use_fast": model_args.use_fast_tokenizer,
338
+ "revision": model_args.model_revision,
339
+ "token": model_args.token,
340
+ "trust_remote_code": model_args.trust_remote_code,
341
+ }
342
+ if model_args.tokenizer_name:
343
+ tokenizer = DNATokenizerFast.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
344
+ elif model_args.model_name_or_path:
345
+ tokenizer = DNATokenizerFast.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
346
+ else:
347
+ raise ValueError(
348
+ "You are instantiating a new tokenizer from scratch. This is not supported by this script. "
349
+ "You can do it from another script, save it, and load it from here, using --tokenizer_name."
350
+ )
351
+
352
+ if model_args.model_name_or_path:
353
+ torch_dtype = (
354
+ model_args.torch_dtype
355
+ if model_args.torch_dtype in ["auto", None]
356
+ else getattr(torch, model_args.torch_dtype)
357
+ )
358
+ model = AutoModelForMaskedLM.from_pretrained(
359
+ model_args.model_name_or_path,
360
+ from_tf=bool(".ckpt" in model_args.model_name_or_path),
361
+ config=config,
362
+ cache_dir=model_args.cache_dir,
363
+ revision=model_args.model_revision,
364
+ token=model_args.token,
365
+ trust_remote_code=model_args.trust_remote_code,
366
+ torch_dtype=torch_dtype,
367
+ low_cpu_mem_usage=model_args.low_cpu_mem_usage,
368
+ )
369
+ else:
370
+ logger.info("Training new model from scratch")
371
+ model = AutoModelForMaskedLM.from_config(config, trust_remote_code=model_args.trust_remote_code)
372
+
373
+ # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
374
+ # on a small vocab and want a smaller embedding size, remove this test.
375
+ embedding_size = model.get_input_embeddings().weight.shape[0]
376
+ if len(tokenizer) > embedding_size:
377
+ model.resize_token_embeddings(len(tokenizer))
378
+
379
+ origin_datasets = datasets.load_from_disk(data_args.dataset_name)
380
+
381
+ data_args.validation_split_percentage = None \
382
+ if (isinstance(origin_datasets, DatasetDict) and "validation" in origin_datasets.keys()) \
383
+ else data_args.validation_split_percentage
384
+ if data_args.validation_split_percentage > 0.0:
385
+ split_ratio = data_args.validation_split_percentage / 100
386
+ tokenized_datasets = origin_datasets.train_test_split(split_ratio)
387
+ tokenized_datasets["validation"] = tokenized_datasets["test"]
388
+ else:
389
+ tokenized_datasets = origin_datasets
390
+
391
+ if training_args.do_train:
392
+ if "train" not in tokenized_datasets:
393
+ raise ValueError("--do_train requires a train dataset")
394
+ train_dataset = tokenized_datasets["train"]
395
+ if data_args.max_train_samples is not None:
396
+ max_train_samples = min(len(train_dataset), data_args.max_train_samples)
397
+ train_dataset = train_dataset.select(range(max_train_samples))
398
+
399
+ if training_args.do_eval:
400
+ if "validation" not in tokenized_datasets:
401
+ raise ValueError("--do_eval requires a validation dataset")
402
+ eval_dataset = tokenized_datasets["validation"]
403
+ if data_args.max_eval_samples is not None:
404
+ max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
405
+ eval_dataset = eval_dataset.select(range(max_eval_samples))
406
+
407
+ def preprocess_logits_for_metrics(logits, labels):
408
+ if isinstance(logits, tuple):
409
+ # Depending on the model and config, logits may contain extra tensors,
410
+ # like past_key_values, but logits always come first
411
+ logits = logits[0]
412
+ return logits.argmax(dim=-1)
413
+
414
+ metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
415
+
416
+ def compute_metrics(eval_preds):
417
+ preds, labels = eval_preds
418
+ # preds have the same shape as the labels, after the argmax(-1) has been calculated
419
+ # by preprocess_logits_for_metrics
420
+ labels = labels.reshape(-1)
421
+ preds = preds.reshape(-1)
422
+ mask = labels != -100
423
+ labels = labels[mask]
424
+ preds = preds[mask]
425
+ return metric.compute(predictions=preds, references=labels)
426
+
427
+ # Data collator
428
+ # This one will take care of randomly masking the tokens.
429
+ pad_to_multiple_of_8 = data_args.line_by_line and training_args.fp16 and not data_args.pad_to_max_length
430
+ data_collator = DataCollatorForLanguageModeling(
431
+ tokenizer=tokenizer,
432
+ mlm_probability=data_args.mlm_probability,
433
+ pad_to_multiple_of=8 if pad_to_multiple_of_8 else None,
434
+ )
435
+
436
+ # Initialize our Trainer
437
+ trainer = Trainer(
438
+ model=model,
439
+ args=training_args,
440
+ train_dataset=train_dataset if training_args.do_train else None,
441
+ eval_dataset=eval_dataset if training_args.do_eval else None,
442
+ tokenizer=tokenizer,
443
+ data_collator=data_collator,
444
+ compute_metrics=compute_metrics if training_args.do_eval and not is_torch_xla_available() else None,
445
+ preprocess_logits_for_metrics=preprocess_logits_for_metrics
446
+ if training_args.do_eval and not is_torch_xla_available()
447
+ else None,
448
+ )
449
+
450
+ # Training
451
+ if training_args.do_train:
452
+ checkpoint = None
453
+ if training_args.resume_from_checkpoint is not None:
454
+ checkpoint = training_args.resume_from_checkpoint
455
+ elif last_checkpoint is not None:
456
+ checkpoint = last_checkpoint
457
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
458
+ trainer.save_model() # Saves the tokenizer too for easy upload
459
+ metrics = train_result.metrics
460
+
461
+ max_train_samples = (
462
+ data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
463
+ )
464
+ metrics["train_samples"] = min(max_train_samples, len(train_dataset))
465
+
466
+ trainer.log_metrics("train", metrics)
467
+ trainer.save_metrics("train", metrics)
468
+ trainer.save_state()
469
+
470
+ # Evaluation
471
+ if training_args.do_eval:
472
+ logger.info("*** Evaluate ***")
473
+
474
+ metrics = trainer.evaluate()
475
+
476
+ max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
477
+ metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
478
+ try:
479
+ perplexity = math.exp(metrics["eval_loss"])
480
+ except OverflowError:
481
+ perplexity = float("inf")
482
+ metrics["perplexity"] = perplexity
483
+
484
+ trainer.log_metrics("eval", metrics)
485
+ trainer.save_metrics("eval", metrics)
486
+
487
+ # kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "fill-mask"}
488
+ # if data_args.dataset_name is not None:
489
+ # kwargs["dataset_tags"] = data_args.dataset_name
490
+ # if data_args.dataset_config_name is not None:
491
+ # kwargs["dataset_args"] = data_args.dataset_config_name
492
+ # kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
493
+ # else:
494
+ # kwargs["dataset"] = data_args.dataset_name
495
+
496
+ # if training_args.push_to_hub:
497
+ # trainer.push_to_hub(**kwargs)
498
+ # else:
499
+ # trainer.create_model_card(**kwargs)
500
+
501
+
502
+ if __name__ == "__main__":
503
+ main()