Skip to content

Commit 9255029

Browse files
shruti2522johnugeorge
authored andcommitted
updated examples for train API (kubeflow#2077)
Signed-off-by: shruti2522 <[email protected]>
1 parent 926d35d commit 9255029

File tree

3 files changed

+6
-6
lines changed

3 files changed

+6
-6
lines changed

examples/pytorch/language-modeling/train_api_hf_dataset.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
"from kubeflow.storage_initializer.s3 import S3DatasetParams\n",
2222
"from kubeflow.storage_initializer.hugging_face import (\n",
2323
" HuggingFaceModelParams,\n",
24-
" HuggingFaceTrainParams,\n",
24+
" HuggingFaceTrainerParams,\n",
2525
" HfDatasetParams,\n",
2626
")\n",
2727
"from kubeflow.storage_initializer.constants import INIT_CONTAINER_MOUNT_PATH\n",
@@ -71,7 +71,7 @@
7171
" # it is assumed for text related tasks, you have 'text' column in the dataset.\n",
7272
" # for more info on how dataset is loaded check load_and_preprocess_data function in sdk/python/kubeflow/trainer/hf_llm_training.py\n",
7373
" dataset_provider_parameters=HfDatasetParams(repo_id=\"imdatta0/ultrachat_1k\"),\n",
74-
" train_parameters=HuggingFaceTrainParams(\n",
74+
" trainer_parameters=HuggingFaceTrainerParams(\n",
7575
" lora_config=LoraConfig(\n",
7676
" r=8,\n",
7777
" lora_alpha=8,\n",

examples/pytorch/language-modeling/train_api_s3_dataset.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
"from kubeflow.training.api.training_client import TrainingClient\n",
2121
"from kubeflow.storage_initializer.hugging_face import (\n",
2222
" HuggingFaceModelParams,\n",
23-
" HuggingFaceTrainParams,\n",
23+
" HuggingFaceTrainerParams,\n",
2424
" HfDatasetParams,\n",
2525
")\n",
2626
"from kubeflow.storage_initializer.constants import INIT_CONTAINER_MOUNT_PATH\n",
@@ -90,7 +90,7 @@
9090
" \"secret_key\": s3_secret_key,\n",
9191
" }\n",
9292
" ),\n",
93-
" train_parameters=HuggingFaceTrainParams(\n",
93+
" trainer_parameters=HuggingFaceTrainerParams(\n",
9494
" lora_config=LoraConfig(\n",
9595
" r=8,\n",
9696
" lora_alpha=8,\n",

examples/pytorch/text-classification/Fine-Tune-BERT-LLM.ipynb

+2-2
Original file line numberDiff line numberDiff line change
@@ -613,7 +613,7 @@
613613
"from kubeflow.training import TrainingClient\n",
614614
"from kubeflow.storage_initializer.hugging_face import (\n",
615615
" HuggingFaceModelParams,\n",
616-
" HuggingFaceTrainParams,\n",
616+
" HuggingFaceTrainerParams,\n",
617617
" HfDatasetParams,\n",
618618
")\n",
619619
"\n",
@@ -651,7 +651,7 @@
651651
" split=\"train[:3000]\",\n",
652652
" ),\n",
653653
" # Specify HuggingFace Trainer parameters. In this example, we will skip evaluation and model checkpoints.\n",
654-
" train_parameters=HuggingFaceTrainParams(\n",
654+
" trainer_parameters=HuggingFaceTrainerParams(\n",
655655
" training_parameters=transformers.TrainingArguments(\n",
656656
" output_dir=\"test_trainer\",\n",
657657
" save_strategy=\"no\",\n",

0 commit comments

Comments
 (0)