@@ -1716,20 +1716,24 @@ def delete_job_metadata(
1716
1716
:func:`~google.cloud.bigquery.client.Client.cancel_job` instead.
1717
1717
1718
1718
Args:
1719
- job_id: Job or job identifier.
1720
-
1721
- Keyword Arguments:
1722
- project:
1719
+ job_id (Union[ \
1720
+ str, \
1721
+ LoadJob, \
1722
+ CopyJob, \
1723
+ ExtractJob, \
1724
+ QueryJob \
1725
+ ]): Job or job identifier.
1726
+ project (Optional[str]):
1723
1727
ID of the project which owns the job (defaults to the client's project).
1724
- location:
1728
+ location (Optional[str]) :
1725
1729
Location where the job was run. Ignored if ``job_id`` is a job
1726
1730
object.
1727
- retry:
1731
+ retry (Optional[google.api_core.retry.Retry]) :
1728
1732
How to retry the RPC.
1729
- timeout:
1733
+ timeout (Optional[float]) :
1730
1734
The number of seconds to wait for the underlying HTTP transport
1731
1735
before using ``retry``.
1732
- not_found_ok:
1736
+ not_found_ok (Optional[bool]) :
1733
1737
Defaults to ``False``. If ``True``, ignore "not found" errors
1734
1738
when deleting the job.
1735
1739
"""
@@ -1970,12 +1974,10 @@ def create_job(
1970
1974
timeout : TimeoutType = DEFAULT_TIMEOUT ,
1971
1975
) -> Union [job .LoadJob , job .CopyJob , job .ExtractJob , job .QueryJob ]:
1972
1976
"""Create a new job.
1977
+
1973
1978
Args:
1974
1979
job_config (dict): configuration job representation returned from the API.
1975
-
1976
- Keyword Arguments:
1977
- retry (Optional[google.api_core.retry.Retry]):
1978
- How to retry the RPC.
1980
+ retry (Optional[google.api_core.retry.Retry]): How to retry the RPC.
1979
1981
timeout (Optional[float]):
1980
1982
The number of seconds to wait for the underlying HTTP transport
1981
1983
before using ``retry``.
@@ -2066,10 +2068,14 @@ def get_job(
2066
2068
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get
2067
2069
2068
2070
Args:
2069
- job_id:
2071
+ job_id (Union[ \
2072
+ str, \
2073
+ job.LoadJob, \
2074
+ job.CopyJob, \
2075
+ job.ExtractJob, \
2076
+ job.QueryJob \
2077
+ ]):
2070
2078
Job identifier.
2071
-
2072
- Keyword Arguments:
2073
2079
project (Optional[str]):
2074
2080
ID of the project which owns the job (defaults to the client's project).
2075
2081
location (Optional[str]):
@@ -2137,8 +2143,6 @@ def cancel_job(
2137
2143
google.cloud.bigquery.job.ExtractJob, \
2138
2144
google.cloud.bigquery.job.QueryJob \
2139
2145
]): Job identifier.
2140
-
2141
- Keyword Arguments:
2142
2146
project (Optional[str]):
2143
2147
ID of the project which owns the job (defaults to the client's project).
2144
2148
location (Optional[str]):
@@ -2340,8 +2344,6 @@ def load_table_from_uri(
2340
2344
in, this method attempts to create a table reference from a
2341
2345
string using
2342
2346
:func:`google.cloud.bigquery.table.TableReference.from_string`.
2343
-
2344
- Keyword Arguments:
2345
2347
job_id (Optional[str]): Name of the job.
2346
2348
job_id_prefix (Optional[str]):
2347
2349
The user-provided prefix for a randomly generated job ID.
@@ -2415,39 +2417,42 @@ def load_table_from_file(
2415
2417
returns a :class:`~google.cloud.bigquery.job.LoadJob`.
2416
2418
2417
2419
Args:
2418
- file_obj:
2420
+ file_obj (IO[bytes]) :
2419
2421
A file handle opened in binary mode for reading.
2420
- destination:
2422
+ destination (Union[Table, \
2423
+ TableReference, \
2424
+ TableListItem, \
2425
+ str \
2426
+ ]):
2421
2427
Table into which data is to be loaded. If a string is passed
2422
2428
in, this method attempts to create a table reference from a
2423
2429
string using
2424
2430
:func:`google.cloud.bigquery.table.TableReference.from_string`.
2425
-
2426
- Keyword Arguments:
2427
- rewind:
2431
+ rewind (Optional[bool]):
2428
2432
If True, seek to the beginning of the file handle before
2429
- reading the file.
2430
- size:
2433
+ reading the file. Defaults to False.
2434
+ size (Optional[int]) :
2431
2435
The number of bytes to read from the file handle. If size is
2432
2436
``None`` or large, resumable upload will be used. Otherwise,
2433
2437
multipart upload will be used.
2434
- num_retries: Number of upload retries. Defaults to 6.
2435
- job_id: Name of the job.
2436
- job_id_prefix:
2438
+ num_retries (Optional[int]) : Number of upload retries. Defaults to 6.
2439
+ job_id (Optional[str]) : Name of the job.
2440
+ job_id_prefix (Optional[str]) :
2437
2441
The user-provided prefix for a randomly generated job ID.
2438
2442
This parameter will be ignored if a ``job_id`` is also given.
2439
- location:
2443
+ location (Optional[str]) :
2440
2444
Location where to run the job. Must match the location of the
2441
2445
destination table.
2442
- project:
2446
+ project (Optional[str]) :
2443
2447
Project ID of the project of where to run the job. Defaults
2444
2448
to the client's project.
2445
- job_config:
2449
+ job_config (Optional[LoadJobConfig]) :
2446
2450
Extra configuration options for the job.
2447
- timeout:
2451
+ timeout (Optional[float]) :
2448
2452
The number of seconds to wait for the underlying HTTP transport
2449
2453
before using ``retry``. Depending on the retry strategy, a request
2450
2454
may be repeated several times using the same timeout each time.
2455
+ Defaults to None.
2451
2456
2452
2457
Can also be passed as a tuple (connect_timeout, read_timeout).
2453
2458
See :meth:`requests.Session.request` documentation for details.
@@ -2535,9 +2540,13 @@ def load_table_from_dataframe(
2535
2540
https://github.com/googleapis/python-bigquery/issues/19
2536
2541
2537
2542
Args:
2538
- dataframe:
2543
+ dataframe (pandas.Dataframe) :
2539
2544
A :class:`~pandas.DataFrame` containing the data to load.
2540
- destination:
2545
+ destination (Union[ \
2546
+ Table, \
2547
+ TableReference, \
2548
+ str \
2549
+ ]):
2541
2550
The destination table to use for loading the data. If it is an
2542
2551
existing table, the schema of the :class:`~pandas.DataFrame`
2543
2552
must match the schema of the destination table. If the table
@@ -2547,21 +2556,19 @@ def load_table_from_dataframe(
2547
2556
If a string is passed in, this method attempts to create a
2548
2557
table reference from a string using
2549
2558
:func:`google.cloud.bigquery.table.TableReference.from_string`.
2550
-
2551
- Keyword Arguments:
2552
- num_retries: Number of upload retries.
2553
- job_id: Name of the job.
2554
- job_id_prefix:
2559
+ num_retries (Optional[int]): Number of upload retries. Defaults to 6.
2560
+ job_id (Optional[str]): Name of the job.
2561
+ job_id_prefix (Optional[str]):
2555
2562
The user-provided prefix for a randomly generated
2556
2563
job ID. This parameter will be ignored if a ``job_id`` is
2557
2564
also given.
2558
- location:
2565
+ location (Optional[str]) :
2559
2566
Location where to run the job. Must match the location of the
2560
2567
destination table.
2561
- project:
2568
+ project (Optional[str]) :
2562
2569
Project ID of the project of where to run the job. Defaults
2563
2570
to the client's project.
2564
- job_config:
2571
+ job_config (Optional[LoadJobConfig]) :
2565
2572
Extra configuration options for the job.
2566
2573
2567
2574
To override the default pandas data type conversions, supply
@@ -2578,9 +2585,10 @@ def load_table_from_dataframe(
2578
2585
:attr:`~google.cloud.bigquery.job.SourceFormat.CSV` and
2579
2586
:attr:`~google.cloud.bigquery.job.SourceFormat.PARQUET` are
2580
2587
supported.
2581
- parquet_compression:
2588
+ parquet_compression (Optional[str]) :
2582
2589
[Beta] The compression method to use if intermittently
2583
2590
serializing ``dataframe`` to a parquet file.
2591
+ Defaults to "snappy".
2584
2592
2585
2593
The argument is directly passed as the ``compression``
2586
2594
argument to the underlying ``pyarrow.parquet.write_table()``
@@ -2591,10 +2599,11 @@ def load_table_from_dataframe(
2591
2599
passed as the ``compression`` argument to the underlying
2592
2600
``DataFrame.to_parquet()`` method.
2593
2601
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_parquet.html#pandas.DataFrame.to_parquet
2594
- timeout:
2602
+ timeout (Optional[flaot]) :
2595
2603
The number of seconds to wait for the underlying HTTP transport
2596
2604
before using ``retry``. Depending on the retry strategy, a request may
2597
2605
be repeated several times using the same timeout each time.
2606
+ Defaults to None.
2598
2607
2599
2608
Can also be passed as a tuple (connect_timeout, read_timeout).
2600
2609
See :meth:`requests.Session.request` documentation for details.
@@ -2784,32 +2793,36 @@ def load_table_from_json(
2784
2793
client = bigquery.Client()
2785
2794
client.load_table_from_file(data_as_file, ...)
2786
2795
2787
- destination:
2796
+ destination (Union[ \
2797
+ Table, \
2798
+ TableReference, \
2799
+ TableListItem, \
2800
+ str \
2801
+ ]):
2788
2802
Table into which data is to be loaded. If a string is passed
2789
2803
in, this method attempts to create a table reference from a
2790
2804
string using
2791
2805
:func:`google.cloud.bigquery.table.TableReference.from_string`.
2792
-
2793
- Keyword Arguments:
2794
- num_retries: Number of upload retries.
2795
- job_id: Name of the job.
2796
- job_id_prefix:
2806
+ num_retries (Optional[int]): Number of upload retries. Defaults to 6.
2807
+ job_id (Optional[str]): Name of the job.
2808
+ job_id_prefix (Optional[str]):
2797
2809
The user-provided prefix for a randomly generated job ID.
2798
2810
This parameter will be ignored if a ``job_id`` is also given.
2799
- location:
2811
+ location (Optional[str]) :
2800
2812
Location where to run the job. Must match the location of the
2801
2813
destination table.
2802
- project:
2814
+ project (Optional[str]) :
2803
2815
Project ID of the project of where to run the job. Defaults
2804
2816
to the client's project.
2805
- job_config:
2817
+ job_config (Optional[LoadJobConfig]) :
2806
2818
Extra configuration options for the job. The ``source_format``
2807
2819
setting is always set to
2808
2820
:attr:`~google.cloud.bigquery.job.SourceFormat.NEWLINE_DELIMITED_JSON`.
2809
- timeout:
2821
+ timeout (Optional[float]) :
2810
2822
The number of seconds to wait for the underlying HTTP transport
2811
2823
before using ``retry``. Depending on the retry strategy, a request may
2812
2824
be repeated several times using the same timeout each time.
2825
+ Defaults to None.
2813
2826
2814
2827
Can also be passed as a tuple (connect_timeout, read_timeout).
2815
2828
See :meth:`requests.Session.request` documentation for details.
@@ -2885,23 +2898,19 @@ def _do_resumable_upload(
2885
2898
"""Perform a resumable upload.
2886
2899
2887
2900
Args:
2888
- stream: A bytes IO object open for reading.
2889
-
2890
- metadata: The metadata associated with the upload.
2891
-
2892
- num_retries:
2901
+ stream (IO[bytes]): A bytes IO object open for reading.
2902
+ metadata (Mapping[str, str]): The metadata associated with the upload.
2903
+ num_retries (int):
2893
2904
Number of upload retries. (Deprecated: This
2894
2905
argument will be removed in a future release.)
2895
-
2896
- timeout:
2906
+ timeout (Optional[float]):
2897
2907
The number of seconds to wait for the underlying HTTP transport
2898
2908
before using ``retry``. Depending on the retry strategy, a request may
2899
2909
be repeated several times using the same timeout each time.
2900
2910
2901
2911
Can also be passed as a tuple (connect_timeout, read_timeout).
2902
2912
See :meth:`requests.Session.request` documentation for details.
2903
-
2904
- project:
2913
+ project (Optional[str]):
2905
2914
Project ID of the project of where to run the upload. Defaults
2906
2915
to the client's project.
2907
2916
@@ -2929,23 +2938,19 @@ def _initiate_resumable_upload(
2929
2938
"""Initiate a resumable upload.
2930
2939
2931
2940
Args:
2932
- stream: A bytes IO object open for reading.
2933
-
2934
- metadata: The metadata associated with the upload.
2935
-
2936
- num_retries:
2941
+ stream (IO[bytes]): A bytes IO object open for reading.
2942
+ metadata (Mapping[str, str]): The metadata associated with the upload.
2943
+ num_retries (int):
2937
2944
Number of upload retries. (Deprecated: This
2938
2945
argument will be removed in a future release.)
2939
-
2940
- timeout:
2946
+ timeout (Optional[float]):
2941
2947
The number of seconds to wait for the underlying HTTP transport
2942
2948
before using ``retry``. Depending on the retry strategy, a request may
2943
2949
be repeated several times using the same timeout each time.
2944
2950
2945
2951
Can also be passed as a tuple (connect_timeout, read_timeout).
2946
2952
See :meth:`requests.Session.request` documentation for details.
2947
-
2948
- project:
2953
+ project (Optional[str]):
2949
2954
Project ID of the project of where to run the upload. Defaults
2950
2955
to the client's project.
2951
2956
@@ -3005,28 +3010,23 @@ def _do_multipart_upload(
3005
3010
"""Perform a multipart upload.
3006
3011
3007
3012
Args:
3008
- stream: A bytes IO object open for reading.
3009
-
3010
- metadata: The metadata associated with the upload.
3011
-
3012
- size:
3013
+ stream (IO[bytes]): A bytes IO object open for reading.
3014
+ metadata (Mapping[str, str]): The metadata associated with the upload.
3015
+ size (int):
3013
3016
The number of bytes to be uploaded (which will be read
3014
3017
from ``stream``). If not provided, the upload will be
3015
3018
concluded once ``stream`` is exhausted (or :data:`None`).
3016
-
3017
- num_retries:
3019
+ num_retries (int):
3018
3020
Number of upload retries. (Deprecated: This
3019
3021
argument will be removed in a future release.)
3020
-
3021
- timeout:
3022
+ timeout (Optional[float]):
3022
3023
The number of seconds to wait for the underlying HTTP transport
3023
3024
before using ``retry``. Depending on the retry strategy, a request may
3024
3025
be repeated several times using the same timeout each time.
3025
3026
3026
3027
Can also be passed as a tuple (connect_timeout, read_timeout).
3027
3028
See :meth:`requests.Session.request` documentation for details.
3028
-
3029
- project:
3029
+ project (Optional[str]):
3030
3030
Project ID of the project of where to run the upload. Defaults
3031
3031
to the client's project.
3032
3032
@@ -3118,8 +3118,6 @@ def copy_table(
3118
3118
str, \
3119
3119
]):
3120
3120
Table into which data is to be copied.
3121
-
3122
- Keyword Arguments:
3123
3121
job_id (Optional[str]): The ID of the job.
3124
3122
job_id_prefix (Optional[str]):
3125
3123
The user-provided prefix for a randomly generated job ID.
@@ -3216,8 +3214,6 @@ def extract_table(
3216
3214
URIs of Cloud Storage file(s) into which table data is to be
3217
3215
extracted; in format
3218
3216
``gs://<bucket_name>/<object_name_or_glob>``.
3219
-
3220
- Keyword Arguments:
3221
3217
job_id (Optional[str]): The ID of the job.
3222
3218
job_id_prefix (Optional[str]):
3223
3219
The user-provided prefix for a randomly generated job ID.
@@ -3306,8 +3302,6 @@ def query(
3306
3302
query (str):
3307
3303
SQL query to be executed. Defaults to the standard SQL
3308
3304
dialect. Use the ``job_config`` parameter to change dialects.
3309
-
3310
- Keyword Arguments:
3311
3305
job_config (Optional[google.cloud.bigquery.job.QueryJobConfig]):
3312
3306
Extra configuration options for the job.
3313
3307
To override any options that were previously set in
0 commit comments