Skip to content

Commit 4dfb920

Browse files
kiraksichalmerlowe
andauthored
fix: keyword rendering and docstring improvements (#1829)
* fix: keyword rendering and docstring improvements * fix error * small lint fix --------- Co-authored-by: Chalmer Lowe <[email protected]>
1 parent d62cabb commit 4dfb920

File tree

1 file changed

+84
-90
lines changed

1 file changed

+84
-90
lines changed

google/cloud/bigquery/client.py

+84-90
Original file line numberDiff line numberDiff line change
@@ -1716,20 +1716,24 @@ def delete_job_metadata(
17161716
:func:`~google.cloud.bigquery.client.Client.cancel_job` instead.
17171717
17181718
Args:
1719-
job_id: Job or job identifier.
1720-
1721-
Keyword Arguments:
1722-
project:
1719+
job_id (Union[ \
1720+
str, \
1721+
LoadJob, \
1722+
CopyJob, \
1723+
ExtractJob, \
1724+
QueryJob \
1725+
]): Job or job identifier.
1726+
project (Optional[str]):
17231727
ID of the project which owns the job (defaults to the client's project).
1724-
location:
1728+
location (Optional[str]):
17251729
Location where the job was run. Ignored if ``job_id`` is a job
17261730
object.
1727-
retry:
1731+
retry (Optional[google.api_core.retry.Retry]):
17281732
How to retry the RPC.
1729-
timeout:
1733+
timeout (Optional[float]):
17301734
The number of seconds to wait for the underlying HTTP transport
17311735
before using ``retry``.
1732-
not_found_ok:
1736+
not_found_ok (Optional[bool]):
17331737
Defaults to ``False``. If ``True``, ignore "not found" errors
17341738
when deleting the job.
17351739
"""
@@ -1970,12 +1974,10 @@ def create_job(
19701974
timeout: TimeoutType = DEFAULT_TIMEOUT,
19711975
) -> Union[job.LoadJob, job.CopyJob, job.ExtractJob, job.QueryJob]:
19721976
"""Create a new job.
1977+
19731978
Args:
19741979
job_config (dict): configuration job representation returned from the API.
1975-
1976-
Keyword Arguments:
1977-
retry (Optional[google.api_core.retry.Retry]):
1978-
How to retry the RPC.
1980+
retry (Optional[google.api_core.retry.Retry]): How to retry the RPC.
19791981
timeout (Optional[float]):
19801982
The number of seconds to wait for the underlying HTTP transport
19811983
before using ``retry``.
@@ -2066,10 +2068,14 @@ def get_job(
20662068
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get
20672069
20682070
Args:
2069-
job_id:
2071+
job_id (Union[ \
2072+
str, \
2073+
job.LoadJob, \
2074+
job.CopyJob, \
2075+
job.ExtractJob, \
2076+
job.QueryJob \
2077+
]):
20702078
Job identifier.
2071-
2072-
Keyword Arguments:
20732079
project (Optional[str]):
20742080
ID of the project which owns the job (defaults to the client's project).
20752081
location (Optional[str]):
@@ -2137,8 +2143,6 @@ def cancel_job(
21372143
google.cloud.bigquery.job.ExtractJob, \
21382144
google.cloud.bigquery.job.QueryJob \
21392145
]): Job identifier.
2140-
2141-
Keyword Arguments:
21422146
project (Optional[str]):
21432147
ID of the project which owns the job (defaults to the client's project).
21442148
location (Optional[str]):
@@ -2340,8 +2344,6 @@ def load_table_from_uri(
23402344
in, this method attempts to create a table reference from a
23412345
string using
23422346
:func:`google.cloud.bigquery.table.TableReference.from_string`.
2343-
2344-
Keyword Arguments:
23452347
job_id (Optional[str]): Name of the job.
23462348
job_id_prefix (Optional[str]):
23472349
The user-provided prefix for a randomly generated job ID.
@@ -2415,39 +2417,42 @@ def load_table_from_file(
24152417
returns a :class:`~google.cloud.bigquery.job.LoadJob`.
24162418
24172419
Args:
2418-
file_obj:
2420+
file_obj (IO[bytes]):
24192421
A file handle opened in binary mode for reading.
2420-
destination:
2422+
destination (Union[Table, \
2423+
TableReference, \
2424+
TableListItem, \
2425+
str \
2426+
]):
24212427
Table into which data is to be loaded. If a string is passed
24222428
in, this method attempts to create a table reference from a
24232429
string using
24242430
:func:`google.cloud.bigquery.table.TableReference.from_string`.
2425-
2426-
Keyword Arguments:
2427-
rewind:
2431+
rewind (Optional[bool]):
24282432
If True, seek to the beginning of the file handle before
2429-
reading the file.
2430-
size:
2433+
reading the file. Defaults to False.
2434+
size (Optional[int]):
24312435
The number of bytes to read from the file handle. If size is
24322436
``None`` or large, resumable upload will be used. Otherwise,
24332437
multipart upload will be used.
2434-
num_retries: Number of upload retries. Defaults to 6.
2435-
job_id: Name of the job.
2436-
job_id_prefix:
2438+
num_retries (Optional[int]): Number of upload retries. Defaults to 6.
2439+
job_id (Optional[str]): Name of the job.
2440+
job_id_prefix (Optional[str]):
24372441
The user-provided prefix for a randomly generated job ID.
24382442
This parameter will be ignored if a ``job_id`` is also given.
2439-
location:
2443+
location (Optional[str]):
24402444
Location where to run the job. Must match the location of the
24412445
destination table.
2442-
project:
2446+
project (Optional[str]):
24432447
Project ID of the project of where to run the job. Defaults
24442448
to the client's project.
2445-
job_config:
2449+
job_config (Optional[LoadJobConfig]):
24462450
Extra configuration options for the job.
2447-
timeout:
2451+
timeout (Optional[float]):
24482452
The number of seconds to wait for the underlying HTTP transport
24492453
before using ``retry``. Depending on the retry strategy, a request
24502454
may be repeated several times using the same timeout each time.
2455+
Defaults to None.
24512456
24522457
Can also be passed as a tuple (connect_timeout, read_timeout).
24532458
See :meth:`requests.Session.request` documentation for details.
@@ -2535,9 +2540,13 @@ def load_table_from_dataframe(
25352540
https://github.com/googleapis/python-bigquery/issues/19
25362541
25372542
Args:
2538-
dataframe:
2543+
dataframe (pandas.Dataframe):
25392544
A :class:`~pandas.DataFrame` containing the data to load.
2540-
destination:
2545+
destination (Union[ \
2546+
Table, \
2547+
TableReference, \
2548+
str \
2549+
]):
25412550
The destination table to use for loading the data. If it is an
25422551
existing table, the schema of the :class:`~pandas.DataFrame`
25432552
must match the schema of the destination table. If the table
@@ -2547,21 +2556,19 @@ def load_table_from_dataframe(
25472556
If a string is passed in, this method attempts to create a
25482557
table reference from a string using
25492558
:func:`google.cloud.bigquery.table.TableReference.from_string`.
2550-
2551-
Keyword Arguments:
2552-
num_retries: Number of upload retries.
2553-
job_id: Name of the job.
2554-
job_id_prefix:
2559+
num_retries (Optional[int]): Number of upload retries. Defaults to 6.
2560+
job_id (Optional[str]): Name of the job.
2561+
job_id_prefix (Optional[str]):
25552562
The user-provided prefix for a randomly generated
25562563
job ID. This parameter will be ignored if a ``job_id`` is
25572564
also given.
2558-
location:
2565+
location (Optional[str]):
25592566
Location where to run the job. Must match the location of the
25602567
destination table.
2561-
project:
2568+
project (Optional[str]):
25622569
Project ID of the project of where to run the job. Defaults
25632570
to the client's project.
2564-
job_config:
2571+
job_config (Optional[LoadJobConfig]):
25652572
Extra configuration options for the job.
25662573
25672574
To override the default pandas data type conversions, supply
@@ -2578,9 +2585,10 @@ def load_table_from_dataframe(
25782585
:attr:`~google.cloud.bigquery.job.SourceFormat.CSV` and
25792586
:attr:`~google.cloud.bigquery.job.SourceFormat.PARQUET` are
25802587
supported.
2581-
parquet_compression:
2588+
parquet_compression (Optional[str]):
25822589
[Beta] The compression method to use if intermittently
25832590
serializing ``dataframe`` to a parquet file.
2591+
Defaults to "snappy".
25842592
25852593
The argument is directly passed as the ``compression``
25862594
argument to the underlying ``pyarrow.parquet.write_table()``
@@ -2591,10 +2599,11 @@ def load_table_from_dataframe(
25912599
passed as the ``compression`` argument to the underlying
25922600
``DataFrame.to_parquet()`` method.
25932601
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_parquet.html#pandas.DataFrame.to_parquet
2594-
timeout:
2602+
timeout (Optional[flaot]):
25952603
The number of seconds to wait for the underlying HTTP transport
25962604
before using ``retry``. Depending on the retry strategy, a request may
25972605
be repeated several times using the same timeout each time.
2606+
Defaults to None.
25982607
25992608
Can also be passed as a tuple (connect_timeout, read_timeout).
26002609
See :meth:`requests.Session.request` documentation for details.
@@ -2784,32 +2793,36 @@ def load_table_from_json(
27842793
client = bigquery.Client()
27852794
client.load_table_from_file(data_as_file, ...)
27862795
2787-
destination:
2796+
destination (Union[ \
2797+
Table, \
2798+
TableReference, \
2799+
TableListItem, \
2800+
str \
2801+
]):
27882802
Table into which data is to be loaded. If a string is passed
27892803
in, this method attempts to create a table reference from a
27902804
string using
27912805
:func:`google.cloud.bigquery.table.TableReference.from_string`.
2792-
2793-
Keyword Arguments:
2794-
num_retries: Number of upload retries.
2795-
job_id: Name of the job.
2796-
job_id_prefix:
2806+
num_retries (Optional[int]): Number of upload retries. Defaults to 6.
2807+
job_id (Optional[str]): Name of the job.
2808+
job_id_prefix (Optional[str]):
27972809
The user-provided prefix for a randomly generated job ID.
27982810
This parameter will be ignored if a ``job_id`` is also given.
2799-
location:
2811+
location (Optional[str]):
28002812
Location where to run the job. Must match the location of the
28012813
destination table.
2802-
project:
2814+
project (Optional[str]):
28032815
Project ID of the project of where to run the job. Defaults
28042816
to the client's project.
2805-
job_config:
2817+
job_config (Optional[LoadJobConfig]):
28062818
Extra configuration options for the job. The ``source_format``
28072819
setting is always set to
28082820
:attr:`~google.cloud.bigquery.job.SourceFormat.NEWLINE_DELIMITED_JSON`.
2809-
timeout:
2821+
timeout (Optional[float]):
28102822
The number of seconds to wait for the underlying HTTP transport
28112823
before using ``retry``. Depending on the retry strategy, a request may
28122824
be repeated several times using the same timeout each time.
2825+
Defaults to None.
28132826
28142827
Can also be passed as a tuple (connect_timeout, read_timeout).
28152828
See :meth:`requests.Session.request` documentation for details.
@@ -2885,23 +2898,19 @@ def _do_resumable_upload(
28852898
"""Perform a resumable upload.
28862899
28872900
Args:
2888-
stream: A bytes IO object open for reading.
2889-
2890-
metadata: The metadata associated with the upload.
2891-
2892-
num_retries:
2901+
stream (IO[bytes]): A bytes IO object open for reading.
2902+
metadata (Mapping[str, str]): The metadata associated with the upload.
2903+
num_retries (int):
28932904
Number of upload retries. (Deprecated: This
28942905
argument will be removed in a future release.)
2895-
2896-
timeout:
2906+
timeout (Optional[float]):
28972907
The number of seconds to wait for the underlying HTTP transport
28982908
before using ``retry``. Depending on the retry strategy, a request may
28992909
be repeated several times using the same timeout each time.
29002910
29012911
Can also be passed as a tuple (connect_timeout, read_timeout).
29022912
See :meth:`requests.Session.request` documentation for details.
2903-
2904-
project:
2913+
project (Optional[str]):
29052914
Project ID of the project of where to run the upload. Defaults
29062915
to the client's project.
29072916
@@ -2929,23 +2938,19 @@ def _initiate_resumable_upload(
29292938
"""Initiate a resumable upload.
29302939
29312940
Args:
2932-
stream: A bytes IO object open for reading.
2933-
2934-
metadata: The metadata associated with the upload.
2935-
2936-
num_retries:
2941+
stream (IO[bytes]): A bytes IO object open for reading.
2942+
metadata (Mapping[str, str]): The metadata associated with the upload.
2943+
num_retries (int):
29372944
Number of upload retries. (Deprecated: This
29382945
argument will be removed in a future release.)
2939-
2940-
timeout:
2946+
timeout (Optional[float]):
29412947
The number of seconds to wait for the underlying HTTP transport
29422948
before using ``retry``. Depending on the retry strategy, a request may
29432949
be repeated several times using the same timeout each time.
29442950
29452951
Can also be passed as a tuple (connect_timeout, read_timeout).
29462952
See :meth:`requests.Session.request` documentation for details.
2947-
2948-
project:
2953+
project (Optional[str]):
29492954
Project ID of the project of where to run the upload. Defaults
29502955
to the client's project.
29512956
@@ -3005,28 +3010,23 @@ def _do_multipart_upload(
30053010
"""Perform a multipart upload.
30063011
30073012
Args:
3008-
stream: A bytes IO object open for reading.
3009-
3010-
metadata: The metadata associated with the upload.
3011-
3012-
size:
3013+
stream (IO[bytes]): A bytes IO object open for reading.
3014+
metadata (Mapping[str, str]): The metadata associated with the upload.
3015+
size (int):
30133016
The number of bytes to be uploaded (which will be read
30143017
from ``stream``). If not provided, the upload will be
30153018
concluded once ``stream`` is exhausted (or :data:`None`).
3016-
3017-
num_retries:
3019+
num_retries (int):
30183020
Number of upload retries. (Deprecated: This
30193021
argument will be removed in a future release.)
3020-
3021-
timeout:
3022+
timeout (Optional[float]):
30223023
The number of seconds to wait for the underlying HTTP transport
30233024
before using ``retry``. Depending on the retry strategy, a request may
30243025
be repeated several times using the same timeout each time.
30253026
30263027
Can also be passed as a tuple (connect_timeout, read_timeout).
30273028
See :meth:`requests.Session.request` documentation for details.
3028-
3029-
project:
3029+
project (Optional[str]):
30303030
Project ID of the project of where to run the upload. Defaults
30313031
to the client's project.
30323032
@@ -3118,8 +3118,6 @@ def copy_table(
31183118
str, \
31193119
]):
31203120
Table into which data is to be copied.
3121-
3122-
Keyword Arguments:
31233121
job_id (Optional[str]): The ID of the job.
31243122
job_id_prefix (Optional[str]):
31253123
The user-provided prefix for a randomly generated job ID.
@@ -3216,8 +3214,6 @@ def extract_table(
32163214
URIs of Cloud Storage file(s) into which table data is to be
32173215
extracted; in format
32183216
``gs://<bucket_name>/<object_name_or_glob>``.
3219-
3220-
Keyword Arguments:
32213217
job_id (Optional[str]): The ID of the job.
32223218
job_id_prefix (Optional[str]):
32233219
The user-provided prefix for a randomly generated job ID.
@@ -3306,8 +3302,6 @@ def query(
33063302
query (str):
33073303
SQL query to be executed. Defaults to the standard SQL
33083304
dialect. Use the ``job_config`` parameter to change dialects.
3309-
3310-
Keyword Arguments:
33113305
job_config (Optional[google.cloud.bigquery.job.QueryJobConfig]):
33123306
Extra configuration options for the job.
33133307
To override any options that were previously set in

0 commit comments

Comments
 (0)