Skip to content

Commit b2edaf1

Browse files
yinghsienwucopybara-github
authored andcommitted
chore: Fix lint
PiperOrigin-RevId: 552860665
1 parent cb904d7 commit b2edaf1

23 files changed

+68
-74
lines changed

.pre-commit-config.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,6 @@ repos:
2626
hooks:
2727
- id: black
2828
- repo: https://github.com/pycqa/flake8
29-
rev: 3.9.2
29+
rev: 6.1.0
3030
hooks:
3131
- id: flake8

google/cloud/aiplatform/jobs.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -685,7 +685,7 @@ def create(
685685
else:
686686
input_config.instances_format = instances_format
687687
input_config.gcs_source = gca_io_compat.GcsSource(
688-
uris=gcs_source if type(gcs_source) == list else [gcs_source]
688+
uris=gcs_source if isinstance(gcs_source, list) else [gcs_source]
689689
)
690690

691691
if bigquery_destination_prefix:

noxfile.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525

2626
import nox
2727

28+
FLAKE8_VERSION = "flake8==6.1.0"
2829
BLACK_VERSION = "black==22.3.0"
2930
ISORT_VERSION = "isort==5.10.1"
3031
LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
@@ -85,7 +86,7 @@ def lint(session):
8586
Returns a failure if the linters find linting errors or sufficiently
8687
serious code quality issues.
8788
"""
88-
session.install("flake8", BLACK_VERSION)
89+
session.install(FLAKE8_VERSION, BLACK_VERSION)
8990
session.run(
9091
"black",
9192
"--check",

samples/snippets/conftest.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,9 @@ def teardown_batch_read_feature_values(shared_state, bigquery_client):
282282
def create_endpoint(shared_state, endpoint_client):
283283
def create(project, location, test_name="temp_deploy_model_test"):
284284
parent = f"projects/{project}/locations/{location}"
285-
endpoint = aiplatform.gapic.Endpoint(display_name=f"{test_name}_{uuid4()}",)
285+
endpoint = aiplatform.gapic.Endpoint(
286+
display_name=f"{test_name}_{uuid4()}",
287+
)
286288
create_endpoint_response = endpoint_client.create_endpoint(
287289
parent=parent, endpoint=endpoint
288290
)

samples/snippets/helpers.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424

2525

2626
def get_name(out, key="name"):
27-
pattern = re.compile(fr'{key}:\s*"([\-a-zA-Z0-9/]+)"')
27+
pattern = re.compile(rf'{key}:\s*"([\-a-zA-Z0-9/]+)"')
2828
name = re.search(pattern, out).group(1)
2929

3030
return name
@@ -38,7 +38,7 @@ def get_state(out):
3838

3939

4040
def get_featurestore_resource_name(out, key="name"):
41-
pattern = re.compile(fr'{key}:\s*"([\_\-a-zA-Z0-9/]+)"')
41+
pattern = re.compile(rf'{key}:\s*"([\_\-a-zA-Z0-9/]+)"')
4242
name = re.search(pattern, out).group(1)
4343

4444
return name
@@ -51,7 +51,7 @@ def wait_for_job_state(
5151
timeout: int = 90,
5252
freq: float = 1.5,
5353
) -> None:
54-
""" Waits until the Job state of provided resource name is a particular state.
54+
"""Waits until the Job state of provided resource name is a particular state.
5555
5656
Args:
5757
get_job_method: Callable[[str], "proto.Message"]
@@ -91,12 +91,12 @@ def flaky_test_diagnostic(file_name, test_name, N=20):
9191
timing_dict = collections.defaultdict(list)
9292
for ri in range(N):
9393
start = timer()
94-
result = pytest.main(['-s', f'{file_name}::{test_name}'])
94+
result = pytest.main(["-s", f"{file_name}::{test_name}"])
9595
end = timer()
96-
delta = end-start
96+
delta = end - start
9797
if result == pytest.ExitCode.OK:
98-
timing_dict['SUCCESS'].append(delta)
98+
timing_dict["SUCCESS"].append(delta)
9999
else:
100-
timing_dict['FAILURE'].append(delta)
100+
timing_dict["FAILURE"].append(delta)
101101

102102
return timing_dict

samples/snippets/noxfile.py

+8-7
Original file line numberDiff line numberDiff line change
@@ -160,6 +160,7 @@ def blacken(session: nox.sessions.Session) -> None:
160160
# format = isort + black
161161
#
162162

163+
163164
@nox.session
164165
def format(session: nox.sessions.Session) -> None:
165166
"""
@@ -187,7 +188,9 @@ def _session_tests(
187188
session: nox.sessions.Session, post_install: Callable = None
188189
) -> None:
189190
# check for presence of tests
190-
test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True)
191+
test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob(
192+
"**/test_*.py", recursive=True
193+
)
191194
test_list.extend(glob.glob("**/tests", recursive=True))
192195

193196
if len(test_list) == 0:
@@ -209,9 +212,7 @@ def _session_tests(
209212

210213
if os.path.exists("requirements-test.txt"):
211214
if os.path.exists("constraints-test.txt"):
212-
session.install(
213-
"-r", "requirements-test.txt", "-c", "constraints-test.txt"
214-
)
215+
session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt")
215216
else:
216217
session.install("-r", "requirements-test.txt")
217218
with open("requirements-test.txt") as rtfile:
@@ -224,9 +225,9 @@ def _session_tests(
224225
post_install(session)
225226

226227
if "pytest-parallel" in packages:
227-
concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto'])
228+
concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"])
228229
elif "pytest-xdist" in packages:
229-
concurrent_args.extend(['-n', 'auto'])
230+
concurrent_args.extend(["-n", "auto"])
230231

231232
session.run(
232233
"pytest",
@@ -256,7 +257,7 @@ def py(session: nox.sessions.Session) -> None:
256257

257258

258259
def _get_repo_root() -> Optional[str]:
259-
""" Returns the root folder of the project. """
260+
"""Returns the root folder of the project."""
260261
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
261262
p = Path(os.getcwd())
262263
for i in range(10):

samples/snippets/noxfile_config.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,6 @@
3030
# secrets here. These values will override predefined values.
3131
"envs": {
3232
"DATA_LABELING_API_ENDPOINT": "us-central1-autopush-aiplatform.sandbox.googleapis.com",
33-
"PYTEST_ADDOPTS": "-n=auto" # Run tests parallel using all available CPUs
33+
"PYTEST_ADDOPTS": "-n=auto", # Run tests parallel using all available CPUs
3434
},
3535
}

samples/snippets/prediction_service/explain_tabular_sample_test.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -34,4 +34,4 @@ def test_ucaip_generated_explain_tabular_sample(capsys):
3434
)
3535

3636
out, _ = capsys.readouterr()
37-
assert 'attribution' in out
37+
assert "attribution" in out

samples/snippets/prediction_service/predict_custom_trained_model_sample.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def predict_custom_trained_model_sample(
3737
# This client only needs to be created once, and can be reused for multiple requests.
3838
client = aiplatform.gapic.PredictionServiceClient(client_options=client_options)
3939
# The format of each instance should conform to the deployed model's prediction input schema.
40-
instances = instances if type(instances) == list else [instances]
40+
instances = instances if isinstance(instances, list) else [instances]
4141
instances = [
4242
json_format.ParseDict(instance_dict, Value()) for instance_dict in instances
4343
]

samples/snippets/prediction_service/predict_image_classification_sample.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,8 @@ def predict_image_classification_sample(
4242
instances = [instance]
4343
# See gs://google-cloud-aiplatform/schema/predict/params/image_classification_1.0.0.yaml for the format of the parameters.
4444
parameters = predict.params.ImageClassificationPredictionParams(
45-
confidence_threshold=0.5, max_predictions=5,
45+
confidence_threshold=0.5,
46+
max_predictions=5,
4647
).to_value()
4748
endpoint = client.endpoint_path(
4849
project=project, location=location, endpoint=endpoint_id

samples/snippets/prediction_service/predict_image_classification_sample_test.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,4 +31,4 @@ def test_ucaip_generated_predict_image_classification_sample(capsys):
3131
)
3232

3333
out, _ = capsys.readouterr()
34-
assert 'deployed_model_id:' in out
34+
assert "deployed_model_id:" in out

samples/snippets/prediction_service/predict_image_object_detection_sample.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,8 @@ def predict_image_object_detection_sample(
4242
instances = [instance]
4343
# See gs://google-cloud-aiplatform/schema/predict/params/image_object_detection_1.0.0.yaml for the format of the parameters.
4444
parameters = predict.params.ImageObjectDetectionPredictionParams(
45-
confidence_threshold=0.5, max_predictions=5,
45+
confidence_threshold=0.5,
46+
max_predictions=5,
4647
).to_value()
4748
endpoint = client.endpoint_path(
4849
project=project, location=location, endpoint=endpoint_id

samples/snippets/prediction_service/predict_image_object_detection_sample_test.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,4 +31,4 @@ def test_ucaip_generated_predict_image_object_detection_sample(capsys):
3131
)
3232

3333
out, _ = capsys.readouterr()
34-
assert 'Salad' in out
34+
assert "Salad" in out

samples/snippets/prediction_service/predict_tabular_classification_sample_test.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -35,4 +35,4 @@ def test_ucaip_generated_predict_tabular_classification_sample(capsys):
3535
)
3636

3737
out, _ = capsys.readouterr()
38-
assert 'setosa' in out
38+
assert "setosa" in out

tests/system/aiplatform/test_featurestore.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -583,7 +583,7 @@ def test_batch_serve_to_df(self, shared_state, caplog):
583583
"average_rating",
584584
]
585585

586-
assert type(df) == pd.DataFrame
586+
assert isinstance(df, pd.DataFrame)
587587
assert list(df.columns) == expected_df_columns
588588
assert df.size == 54
589589
assert "Featurestore feature values served." in caplog.text
@@ -699,16 +699,16 @@ def test_online_reads(self, shared_state):
699699
movie_entity_type = shared_state["movie_entity_type"]
700700

701701
user_entity_views = user_entity_type.read(entity_ids="alice")
702-
assert type(user_entity_views) == pd.DataFrame
702+
assert isinstance(user_entity_views, pd.DataFrame)
703703

704704
movie_entity_views = movie_entity_type.read(
705705
entity_ids=["movie_01", "movie_04"],
706706
feature_ids=[_TEST_MOVIE_TITLE_FEATURE_ID, _TEST_MOVIE_GENRES_FEATURE_ID],
707707
)
708-
assert type(movie_entity_views) == pd.DataFrame
708+
assert isinstance(movie_entity_views, pd.DataFrame)
709709

710710
movie_entity_views = movie_entity_type.read(
711711
entity_ids="movie_01",
712712
feature_ids=[_TEST_MOVIE_TITLE_FEATURE_ID, _TEST_MOVIE_GENRES_FEATURE_ID],
713713
)
714-
assert type(movie_entity_views) == pd.DataFrame
714+
assert isinstance(movie_entity_views, pd.DataFrame)

tests/unit/aiplatform/test_autologging.py

-12
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@
3535
from google.cloud.aiplatform import initializer
3636
from google.cloud.aiplatform import base
3737
from google.cloud.aiplatform_v1 import (
38-
AddContextArtifactsAndExecutionsResponse,
3938
Artifact as GapicArtifact,
4039
Context as GapicContext,
4140
Execution as GapicExecution,
@@ -395,17 +394,6 @@ def add_context_children_mock():
395394
yield add_context_children_mock
396395

397396

398-
@pytest.fixture
399-
def add_context_artifacts_and_executions_mock():
400-
with patch.object(
401-
MetadataServiceClient, "add_context_artifacts_and_executions"
402-
) as add_context_artifacts_and_executions_mock:
403-
add_context_artifacts_and_executions_mock.return_value = (
404-
AddContextArtifactsAndExecutionsResponse()
405-
)
406-
yield add_context_artifacts_and_executions_mock
407-
408-
409397
@pytest.fixture
410398
def get_tensorboard_run_not_found_mock():
411399
with patch.object(

tests/unit/aiplatform/test_datasets.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1444,7 +1444,7 @@ def test_list_dataset(self, list_datasets_mock):
14441444
assert len(ds_list) < len(_TEST_DATASET_LIST)
14451445

14461446
for ds in ds_list:
1447-
assert type(ds) == aiplatform.TabularDataset
1447+
assert isinstance(ds, aiplatform.TabularDataset)
14481448

14491449
def test_list_dataset_no_order_or_filter(self, list_datasets_mock):
14501450

@@ -1456,7 +1456,7 @@ def test_list_dataset_no_order_or_filter(self, list_datasets_mock):
14561456
assert len(ds_list) < len(_TEST_DATASET_LIST)
14571457

14581458
for ds in ds_list:
1459-
assert type(ds) == aiplatform.TabularDataset
1459+
assert isinstance(ds, aiplatform.TabularDataset)
14601460

14611461
@pytest.mark.usefixtures("get_dataset_tabular_missing_metadata_mock")
14621462
def test_tabular_dataset_column_name_missing_metadata(self):

tests/unit/aiplatform/test_deployment_resource_pools.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,7 @@ def test_list(self, list_drp_mock):
324324
list_drp_mock.assert_called_once()
325325

326326
for drp in drp_list:
327-
assert type(drp) == models.DeploymentResourcePool
327+
assert isinstance(drp, models.DeploymentResourcePool)
328328

329329
@pytest.mark.usefixtures("delete_drp_mock", "get_drp_mock")
330330
@pytest.mark.parametrize("sync", [True, False])

tests/unit/aiplatform/test_endpoints.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1999,7 +1999,7 @@ def test_list_endpoint_order_by_time(self, list_endpoints_mock):
19991999
assert len(ep_list) == len(_TEST_ENDPOINT_LIST)
20002000

20012001
for ep in ep_list:
2002-
assert type(ep) == aiplatform.Endpoint
2002+
assert isinstance(ep, aiplatform.Endpoint)
20032003

20042004
assert ep_list[0].create_time > ep_list[1].create_time > ep_list[2].create_time
20052005

@@ -2018,7 +2018,7 @@ def test_list_endpoint_order_by_display_name(self, list_endpoints_mock):
20182018
assert len(ep_list) == len(_TEST_ENDPOINT_LIST)
20192019

20202020
for ep in ep_list:
2021-
assert type(ep) == aiplatform.Endpoint
2021+
assert isinstance(ep, aiplatform.Endpoint)
20222022

20232023
assert (
20242024
ep_list[0].display_name < ep_list[1].display_name < ep_list[2].display_name

0 commit comments

Comments
 (0)