Skip to content

Commit 6995e54

Browse files
authored
chore: fix typos (#760)
1 parent bb60e96 commit 6995e54

13 files changed

+25
-25
lines changed

README.rst

+2-2
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ Initialize the SDK to store common configurations that you use with the SDK.
111111
staging_bucket='gs://my_staging_bucket',
112112
113113
# custom google.auth.credentials.Credentials
114-
# environment default creds used if not set
114+
# environment default credentials used if not set
115115
credentials=my_credentials,
116116
117117
# customer managed encryption key resource name
@@ -188,7 +188,7 @@ Please visit `Using a managed dataset in a custom training application`_ for a d
188188

189189
.. _Using a managed dataset in a custom training application: https://cloud.google.com/vertex-ai/docs/training/using-managed-datasets
190190

191-
It must write the model artifact to the environment variable populated by the traing service:
191+
It must write the model artifact to the environment variable populated by the training service:
192192

193193
.. code-block:: Python
194194

google/cloud/aiplatform/base.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ def log_create_complete(
9898
cls (VertexAiResourceNoun):
9999
Vertex AI Resource Noun class that is being created.
100100
resource (proto.Message):
101-
Vertex AI Resourc proto.Message
101+
Vertex AI Resource proto.Message
102102
variable_name (str): Name of variable to use for code snippet
103103
"""
104104
self._logger.info(f"{cls.__name__} created. Resource name: {resource.name}")
@@ -121,7 +121,7 @@ def log_create_complete_with_getter(
121121
cls (VertexAiResourceNoun):
122122
Vertex AI Resource Noun class that is being created.
123123
resource (proto.Message):
124-
Vertex AI Resourc proto.Message
124+
Vertex AI Resource proto.Message
125125
variable_name (str): Name of variable to use for code snippet
126126
"""
127127
self._logger.info(f"{cls.__name__} created. Resource name: {resource.name}")
@@ -462,7 +462,7 @@ def __init__(
462462
Args:
463463
project(str): Project of the resource noun.
464464
location(str): The location of the resource noun.
465-
credentials(google.auth.crendentials.Crendentials): Optional custom
465+
credentials(google.auth.credentials.Credentials): Optional custom
466466
credentials to use when accessing interacting with resource noun.
467467
resource_name(str): A fully-qualified resource name or ID.
468468
"""
@@ -842,7 +842,7 @@ def __init__(
842842
Args:
843843
project (str): Optional. Project of the resource noun.
844844
location (str): Optional. The location of the resource noun.
845-
credentials(google.auth.crendentials.Crendentials):
845+
credentials(google.auth.credentials.Credentials):
846846
Optional. custom credentials to use when accessing interacting with
847847
resource noun.
848848
resource_name(str): A fully-qualified resource name or ID.
@@ -872,7 +872,7 @@ def _empty_constructor(
872872
Args:
873873
project (str): Optional. Project of the resource noun.
874874
location (str): Optional. The location of the resource noun.
875-
credentials(google.auth.crendentials.Crendentials):
875+
credentials(google.auth.credentials.Credentials):
876876
Optional. custom credentials to use when accessing interacting with
877877
resource noun.
878878
resource_name(str): A fully-qualified resource name or ID.

google/cloud/aiplatform/datasets/dataset.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def create(
164164
be picked randomly. Two DataItems are considered identical
165165
if their content bytes are identical (e.g. image bytes or
166166
pdf bytes). These labels will be overridden by Annotation
167-
labels specified inside index file refenced by
167+
labels specified inside index file referenced by
168168
``import_schema_uri``,
169169
e.g. jsonl file.
170170
project (str):
@@ -488,7 +488,7 @@ def import_data(
488488
be picked randomly. Two DataItems are considered identical
489489
if their content bytes are identical (e.g. image bytes or
490490
pdf bytes). These labels will be overridden by Annotation
491-
labels specified inside index file refenced by
491+
labels specified inside index file referenced by
492492
``import_schema_uri``,
493493
e.g. jsonl file.
494494
sync (bool):

google/cloud/aiplatform/datasets/image_dataset.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def create(
8282
be picked randomly. Two DataItems are considered identical
8383
if their content bytes are identical (e.g. image bytes or
8484
pdf bytes). These labels will be overridden by Annotation
85-
labels specified inside index file refenced by
85+
labels specified inside index file referenced by
8686
``import_schema_uri``,
8787
e.g. jsonl file.
8888
project (str):

google/cloud/aiplatform/datasets/text_dataset.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ def create(
8989
be picked randomly. Two DataItems are considered identical
9090
if their content bytes are identical (e.g. image bytes or
9191
pdf bytes). These labels will be overridden by Annotation
92-
labels specified inside index file refenced by
92+
labels specified inside index file referenced by
9393
``import_schema_uri``,
9494
e.g. jsonl file.
9595
project (str):

google/cloud/aiplatform/datasets/video_dataset.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def create(
8282
be picked randomly. Two DataItems are considered identical
8383
if their content bytes are identical (e.g. image bytes or
8484
pdf bytes). These labels will be overridden by Annotation
85-
labels specified inside index file refenced by
85+
labels specified inside index file referenced by
8686
``import_schema_uri``,
8787
e.g. jsonl file.
8888
project (str):

google/cloud/aiplatform/jobs.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -325,7 +325,7 @@ def output_info(self,) -> Optional[aiplatform.gapic.BatchPredictionJob.OutputInf
325325
"""Information describing the output of this job, including output location
326326
into which prediction output is written.
327327
328-
This is only available for batch predicition jobs that have run successfully.
328+
This is only available for batch prediction jobs that have run successfully.
329329
"""
330330
self._assert_gca_resource_is_available()
331331
return self._gca_resource.output_info
@@ -839,7 +839,7 @@ def __init__(
839839
Args:
840840
project(str): Project of the resource noun.
841841
location(str): The location of the resource noun.
842-
credentials(google.auth.crendentials.Crendentials): Optional custom
842+
credentials(google.auth.credentials.Credentials): Optional custom
843843
credentials to use when accessing interacting with resource noun.
844844
"""
845845

@@ -1023,7 +1023,7 @@ def __init__(
10231023
encryption_spec_key_name: Optional[str] = None,
10241024
staging_bucket: Optional[str] = None,
10251025
):
1026-
"""Cosntruct a Custom Job with Worker Pool Specs.
1026+
"""Constructs a Custom Job with Worker Pool Specs.
10271027
10281028
```
10291029
Example usage:
@@ -1569,7 +1569,7 @@ def __init__(
15691569
Required. Configured CustomJob. The worker pool spec from this custom job
15701570
applies to the CustomJobs created in all the trials.
15711571
metric_spec: Dict[str, str]
1572-
Required. Dicionary representing metrics to optimize. The dictionary key is the metric_id,
1572+
Required. Dictionary representing metrics to optimize. The dictionary key is the metric_id,
15731573
which is reported by your training job, and the dictionary value is the
15741574
optimization goal of the metric('minimize' or 'maximize'). example:
15751575
@@ -1594,7 +1594,7 @@ def __init__(
15941594
DoubleParameterSpec, IntegerParameterSpec, CategoricalParameterSpace, DiscreteParameterSpec
15951595
15961596
max_trial_count (int):
1597-
Reuired. The desired total number of Trials.
1597+
Required. The desired total number of Trials.
15981598
parallel_trial_count (int):
15991599
Required. The desired number of Trials to run in parallel.
16001600
max_failed_trial_count (int):

google/cloud/aiplatform/metadata/metadata.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def log_metrics(self, metrics: Dict[str, Union[float, int]]):
155155
156156
Args:
157157
metrics (Dict):
158-
Required. Metrics key/value pairs. Only flot and int are supported format for value.
158+
Required. Metrics key/value pairs. Only float and int are supported format for value.
159159
Raises:
160160
TypeError: If value contains unsupported types.
161161
ValueError: If Experiment or Run is not set.
@@ -263,7 +263,7 @@ def _validate_metrics_value_type(metrics: Dict[str, Union[float, int]]):
263263
264264
Args:
265265
metrics (Dict):
266-
Required. Metrics key/value pairs. Only flot and int are supported format for value.
266+
Required. Metrics key/value pairs. Only float and int are supported format for value.
267267
Raises:
268268
TypeError: If value contains unsupported types.
269269
"""

google/cloud/aiplatform/tensorboard/uploader_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -377,7 +377,7 @@ def get_or_create(
377377
378378
Returns:
379379
time_series (tensorboard_time_series.TensorboardTimeSeries):
380-
A new or existing tensorboard_time_series.TensorbaordTimeSeries.
380+
A new or existing tensorboard_time_series.TensorboardTimeSeries.
381381
382382
Raises:
383383
exceptions.InvalidArgument:

google/cloud/aiplatform/training_jobs.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -435,7 +435,7 @@ def _create_input_data_config(
435435
- AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test"
436436
Raises:
437437
ValueError: When more than 1 type of split configuration is passed or when
438-
the split configuartion passed is incompatible with the dataset schema.
438+
the split configuration passed is incompatible with the dataset schema.
439439
"""
440440

441441
input_data_config = None
@@ -5811,7 +5811,7 @@ def __init__(
58115811
multiple objects in shots and segments. You can use these
58125812
models to track objects in your videos according to your
58135813
own pre-defined, custom labels.
5814-
"action_recognition" - A video action reconition model pinpoints
5814+
"action_recognition" - A video action recognition model pinpoints
58155815
the location of actions with short temporal durations (~1 second).
58165816
model_type: str = "CLOUD"
58175817
Required. One of the following:

google/cloud/aiplatform/utils/pipeline_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def _get_vertex_value(
155155
inputs, or value is none.
156156
"""
157157
if value is None:
158-
raise ValueError("None values should be filterd out.")
158+
raise ValueError("None values should be filtered out.")
159159

160160
if name not in self._parameter_types:
161161
raise ValueError(

google/cloud/aiplatform/utils/source_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ class _TrainingScriptPythonPackager:
7171
packager = TrainingScriptPythonPackager('my_script.py', ['pandas', 'pytorch'])
7272
gcs_path = packager.package_and_copy_to_gcs(
7373
gcs_staging_dir='my-bucket',
74-
project='my-prject')
74+
project='my-project')
7575
module_name = packager.module_name
7676
7777
The package after installed can be executed as:

google/cloud/aiplatform/utils/worker_spec_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ def chief_worker_pool(
186186
reduction_server_replica_count: int = 0,
187187
reduction_server_machine_type: str = None,
188188
) -> "_DistributedTrainingSpec":
189-
"""Parameterizes Config to support only chief with worker replicas.
189+
"""Parametrizes Config to support only chief with worker replicas.
190190
191191
For replica is assigned to chief and the remainder to workers. All spec have the
192192
same machine type, accelerator count, and accelerator type.

0 commit comments

Comments
 (0)