Skip to content

Commit 4a50182

Browse files
committed
Fix some failing tests
1 parent 13a2455 commit 4a50182

File tree

4 files changed

+94
-80
lines changed

4 files changed

+94
-80
lines changed

hpc_provisioner/src/hpc_provisioner/aws_queries.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -394,6 +394,7 @@ def list_all_fsx(fsx_client) -> list:
394394
else:
395395
file_systems = fsx_client.describe_file_systems()
396396
next_token = file_systems.get("NextToken")
397+
logger.debug(f"Checking for next_token in file systems: {file_systems}")
397398
go_on = next_token is not None
398399
all_fsx.extend(file_systems["FileSystems"])
399400
return all_fsx

hpc_provisioner/src/hpc_provisioner/pcluster_manager.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -303,8 +303,11 @@ def pcluster_delete(cluster: Cluster):
303303
release_subnets(cluster.name)
304304
fsx_client = boto3.client("fsx")
305305
fs = get_fsx(fsx_client=fsx_client, fs_name=cluster.name)
306+
logger.debug(f"Filesystems: {fs}")
306307
if fs:
308+
logger.debug("Found filesystem - deleting")
307309
delete_fsx(fsx_client, fs["FileSystemId"])
310+
logger.debug("Filesystem deleted")
308311
remove_key(get_keypair_name(cluster))
309312
remove_key(get_keypair_name(cluster, "sim"))
310313
return pc.delete_cluster(cluster_name=cluster.name, region=REGION)

hpc_provisioner/tests/test_aws_queries.py

Lines changed: 9 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
get_available_subnet,
1919
get_efs,
2020
get_fsx_by_id,
21-
get_fsx_name,
2221
get_secret,
2322
get_security_group,
2423
remove_key,
@@ -426,40 +425,18 @@ def test_remove_key(patched_boto3):
426425
)
427426

428427

429-
@pytest.mark.parametrize("shared", [True, False])
430-
def test_get_fsx_name(shared, test_cluster):
431-
test_fs_name = "test"
432-
fsx_name = get_fsx_name(shared=shared, fs_name=test_fs_name, cluster=test_cluster)
433-
434-
if shared:
435-
assert fsx_name == test_fs_name
436-
else:
437-
assert (
438-
fsx_name == f"{test_fs_name}-pcluster-{test_cluster.vlab_id}-{test_cluster.project_id}"
439-
)
440-
441-
442-
@pytest.mark.parametrize("shared", [True, False])
443-
def test_create_fsx(shared, test_cluster):
428+
def test_create_fsx(test_cluster):
444429
patched_fsx_client = MagicMock()
445430
retval = {"fs": "fs"}
446431
patched_fsx_client.create_file_system.return_value = retval
447432
test_fs_name = "test"
448-
if shared:
449-
expected_args = {"ClientRequestToken": test_fs_name}
450-
expected_tags = [
451-
{"Key": BILLING_TAG_KEY, "Value": BILLING_TAG_VALUE},
452-
{"Key": "Name", "Value": test_fs_name},
453-
]
454-
else:
455-
token = f"{test_fs_name}-{test_cluster.name}"
456-
expected_args = {"ClientRequestToken": token}
457-
expected_tags = [
458-
{"Key": BILLING_TAG_KEY, "Value": BILLING_TAG_VALUE},
459-
{"Key": "Name", "Value": token},
460-
{"Key": VLAB_TAG_KEY, "Value": test_cluster.vlab_id},
461-
{"Key": PROJECT_TAG_KEY, "Value": test_cluster.project_id},
462-
]
433+
expected_args = {"ClientRequestToken": test_fs_name}
434+
expected_tags = [
435+
{"Key": BILLING_TAG_KEY, "Value": BILLING_TAG_VALUE},
436+
{"Key": VLAB_TAG_KEY, "Value": test_cluster.vlab_id},
437+
{"Key": PROJECT_TAG_KEY, "Value": test_cluster.project_id},
438+
{"Key": "Name", "Value": test_fs_name},
439+
]
463440

464441
expected_args["FileSystemType"] = "LUSTRE"
465442
expected_args["StorageCapacity"] = 19200
@@ -475,10 +452,7 @@ def test_create_fsx(shared, test_cluster):
475452
"EfaEnabled": True,
476453
"MetadataConfiguration": {"Mode": "AUTOMATIC"},
477454
}
478-
if shared:
479-
fsx = create_fsx(patched_fsx_client, test_fs_name, shared)
480-
else:
481-
fsx = create_fsx(patched_fsx_client, test_fs_name, shared, test_cluster)
455+
fsx = create_fsx(patched_fsx_client, test_fs_name, test_cluster)
482456

483457
patched_fsx_client.create_file_system.assert_called_once_with(**expected_args)
484458
assert fsx == retval

hpc_provisioner/tests/test_resource_provisioner.py

Lines changed: 81 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,13 @@
99

1010
from hpc_provisioner import handlers, pcluster_manager
1111
from hpc_provisioner.cluster import Cluster, ClusterJSONEncoder
12-
from hpc_provisioner.constants import FILESYSTEMS
12+
from hpc_provisioner.constants import (
13+
BILLING_TAG_KEY,
14+
BILLING_TAG_VALUE,
15+
DRAS,
16+
PROJECT_TAG_KEY,
17+
VLAB_TAG_KEY,
18+
)
1319
from hpc_provisioner.pcluster_manager import InvalidRequest
1420

1521
logger = logging.getLogger("test_logger")
@@ -234,23 +240,36 @@ def test_post(patched_boto3, patched_dynamodb_resource, post_event, key_exists,
234240
)
235241
@patch("hpc_provisioner.aws_queries.free_subnet")
236242
@patch("hpc_provisioner.pcluster_manager.remove_key")
243+
@patch("hpc_provisioner.pcluster_manager.boto3")
237244
@patch("hpc_provisioner.handlers.dynamodb_resource")
238245
def test_delete(
239246
patched_dynamodb_resource,
247+
patched_boto3,
240248
patched_remove_key,
241249
patched_free_subnet,
242250
patched_dynamodb_client,
243251
data,
244252
delete_event,
245253
test_cluster,
246254
):
255+
mock_fsx_client = MagicMock()
256+
mock_fsx_client.describe_file_systems.return_value = {
257+
"FileSystems": [
258+
{"FileSystemId": "fs-123", "Tags": [{"Key": "Name", "Value": test_cluster.name}]},
259+
{
260+
"FileSystemId": "fs-234",
261+
"Tags": [{"Key": "Name", "Value": f"{test_cluster.name}-2"}],
262+
},
263+
]
264+
}
265+
patched_boto3.client.return_value = mock_fsx_client
247266
mock_resource = MagicMock()
248267
mock_table = MagicMock()
249268
mock_resource.Table.return_value = mock_table
250269
patched_dynamodb_resource.return_value = mock_resource
251270

252-
mock_client = MagicMock()
253-
patched_dynamodb_client.return_value = mock_client
271+
mock_dynamodb_client = MagicMock()
272+
patched_dynamodb_client.return_value = mock_dynamodb_client
254273
with patch(
255274
"hpc_provisioner.pcluster_manager.pc.delete_cluster", return_value=data["deletingCluster"]
256275
) as patched_delete_cluster:
@@ -270,10 +289,13 @@ def test_delete(
270289
assert actual_response == expected_response
271290
patched_get_registered_subnets.assert_called_once()
272291
assert patched_remove_key.call_count == 2
273-
call1 = call(mock_client, "subnet-123")
274-
call2 = call(mock_client, "subnet-234")
292+
call1 = call(mock_dynamodb_client, "subnet-123")
293+
call2 = call(mock_dynamodb_client, "subnet-234")
275294
patched_free_subnet.assert_has_calls([call1, call2], any_order=True)
276295
mock_table.delete_item.assert_called_once_with(Key={"name": test_cluster.name})
296+
mock_fsx_client.delete_file_system.assert_called_once_with(
297+
FileSystemId="fs-123", ClientRequestToken="fs-123"
298+
)
277299

278300

279301
def test_get_not_found(get_event):
@@ -303,14 +325,19 @@ def test_get_internal_server_error(get_event):
303325
"hpc_provisioner.aws_queries.dynamodb_client",
304326
)
305327
@patch("hpc_provisioner.pcluster_manager.remove_key")
328+
@patch("hpc_provisioner.pcluster_manager.boto3")
306329
@patch("hpc_provisioner.handlers.dynamodb_resource")
307330
def test_delete_not_found(
308331
patched_dynamodb_resource,
332+
patched_boto3,
309333
patched_remove_key,
310334
patched_dynamodb_client,
311335
delete_event,
312336
test_cluster,
313337
):
338+
mock_fsx_client = MagicMock()
339+
mock_fsx_client.describe_file_systems.return_value = {"FileSystems": []}
340+
patched_boto3.client.return_value = mock_fsx_client
314341
mock_resource = MagicMock()
315342
mock_table = MagicMock()
316343
mock_resource.Table.return_value = mock_table
@@ -332,14 +359,19 @@ def test_delete_not_found(
332359
"hpc_provisioner.aws_queries.dynamodb_client",
333360
)
334361
@patch("hpc_provisioner.pcluster_manager.remove_key")
362+
@patch("hpc_provisioner.pcluster_manager.boto3")
335363
@patch("hpc_provisioner.handlers.dynamodb_resource")
336364
def test_delete_internal_server_error(
337365
patched_dynamodb_resource,
366+
patched_boto3,
338367
patched_remove_key,
339368
patched_dynamodb_client,
340369
delete_event,
341370
test_cluster,
342371
):
372+
mock_fsx_client = MagicMock()
373+
mock_fsx_client.describe_file_systems.return_value = {"FileSystems": []}
374+
patched_boto3.client.return_value = mock_fsx_client
343375
mock_resource = MagicMock()
344376
mock_table = MagicMock()
345377
mock_resource.Table.return_value = mock_table
@@ -456,42 +488,62 @@ def test_load_tier(tier, is_valid):
456488
def test_dra_check_no_fs_created_yet(
457489
patched_do_cluster_create, patched_boto3, patched_dynamodb_resource
458490
):
491+
pending_clusters = [
492+
Cluster(project_id="proj1", vlab_id="testing"),
493+
Cluster(project_id="proj2", vlab_id="testing"),
494+
]
459495
mock_fsx_client = MagicMock()
460496
mock_fsx_client.describe_file_systems.side_effect = [
461497
{"FileSystems": []},
462498
{"FileSystems": []},
463499
{"FileSystems": []},
464-
{"FileSystems": [{"Lifecycle": "CREATING"}]},
500+
{"FileSystems": []},
501+
{
502+
"FileSystems": [
503+
{
504+
"Lifecycle": "CREATING",
505+
"Tags": [{"Key": "Name", "Value": pending_clusters[0].name}],
506+
}
507+
]
508+
},
465509
]
466510
file_system_id = "fs-123"
467511
mock_fsx_client.create_file_system.return_value = {
468512
"FileSystem": {"FileSystemId": file_system_id}
469513
}
470514
patched_boto3.client.return_value = mock_fsx_client
471-
pending_clusters = [
472-
Cluster(project_id="proj1", vlab_id="testing"),
473-
Cluster(project_id="proj2", vlab_id="testing"),
474-
]
475515
with patch(
476516
"hpc_provisioner.handlers.get_unclaimed_clusters",
477517
return_value=pending_clusters,
478518
):
479519
return_value = handlers.dra_check_handler({})
480520
assert return_value == {
481521
"statusCode": 200,
482-
"body": f"Precreating fsx for cluster {pending_clusters[0].name}",
522+
"headers": {"Content-Type": "application/json"},
523+
"body": json.dumps(
524+
{
525+
pending_clusters[
526+
0
527+
].name: f"Precreating fsx for cluster {pending_clusters[0].name}",
528+
pending_clusters[
529+
1
530+
].name: f"A filesystem is being created - skipping cluster {pending_clusters[1].name} for now",
531+
}
532+
),
483533
}
484534
patched_do_cluster_create.assert_not_called()
485535
mock_fsx_client.create_file_system.assert_called_once_with(
486-
ClientRequestToken="projects",
536+
ClientRequestToken=pending_clusters[0].name,
487537
FileSystemType="LUSTRE",
488538
StorageCapacity=19200,
489539
StorageType="SSD",
490540
SubnetIds=json.loads(os.environ["FS_SUBNET_IDS"]),
491541
SecurityGroupIds=[os.environ["FS_SG_ID"]],
492542
Tags=[
493-
{"Key": "SBO_Billing", "Value": "hpc:parallelcluster"},
494-
{"Key": "Name", "Value": FILESYSTEMS[0]["name"]},
543+
{"Key": BILLING_TAG_KEY, "Value": BILLING_TAG_VALUE},
544+
{"Key": VLAB_TAG_KEY, "Value": pending_clusters[0].vlab_id},
545+
{"Key": PROJECT_TAG_KEY, "Value": pending_clusters[0].project_id},
546+
{"Key": "Name", "Value": pending_clusters[0].name},
495547
],
496548
LustreConfiguration={
497549
"WeeklyMaintenanceStartTime": "6:05:00",
@@ -504,16 +556,16 @@ def test_dra_check_no_fs_created_yet(
504556
)
505557
mock_fsx_client.create_data_repository_association.assert_called_once_with(
506558
FileSystemId=file_system_id,
507-
FileSystemPath=FILESYSTEMS[0]["mountpoint"],
559+
FileSystemPath=DRAS[0]["mountpoint"],
508560
DataRepositoryPath=os.environ["SBO_NEXUSDATA_BUCKET"],
509561
BatchImportMetaDataOnCreate=True,
510562
ImportedFileChunkSize=1024,
511563
S3={"AutoImportPolicy": {"Events": ["NEW", "CHANGED", "DELETED"]}},
512-
ClientRequestToken=f"{file_system_id}-testing-proj1",
564+
ClientRequestToken=f"{file_system_id}-testing-proj1-projects",
513565
Tags=[
514566
{
515567
"Key": "Name",
516-
"Value": f"{file_system_id}-{FILESYSTEMS[0]['mountpoint']}",
568+
"Value": f"{file_system_id}-{DRAS[0]['mountpoint']}",
517569
},
518570
{"Key": "SBO_Billing", "Value": "hpc:parallelcluster"},
519571
{"Key": "obp:costcenter:vlabid", "Value": pending_clusters[0].vlab_id},
@@ -522,32 +574,6 @@ def test_dra_check_no_fs_created_yet(
522574
)
523575

524576

525-
@patch("hpc_provisioner.pcluster_manager.boto3")
526-
@patch("hpc_provisioner.handlers.do_cluster_create")
527-
def test_dra_check_common_fs_creating(patched_do_cluster_create, patched_boto3):
528-
mock_fsx_client = MagicMock()
529-
mock_fsx_client.describe_file_systems.return_value = {
530-
"FileSystems": [{"Lifecycle": "CREATING", "Tags": [{"Key": "Name", "Value": "projects"}]}]
531-
}
532-
patched_boto3.client.return_value = mock_fsx_client
533-
pending_clusters = [
534-
Cluster(project_id="proj1", vlab_id="testing"),
535-
Cluster(project_id="proj2", vlab_id="testing"),
536-
]
537-
with patch(
538-
"hpc_provisioner.handlers.get_unclaimed_clusters",
539-
return_value=pending_clusters,
540-
):
541-
return_value = handlers.dra_check_handler({})
542-
assert return_value == {
543-
"statusCode": 200,
544-
"body": f"A filesystem is being created - skipping cluster {pending_clusters[0].name} for now",
545-
}
546-
patched_do_cluster_create.assert_not_called()
547-
mock_fsx_client.create_file_system.assert_not_called()
548-
mock_fsx_client.create_data_repository_association.assert_not_called()
549-
550-
551577
@patch("hpc_provisioner.pcluster_manager.boto3")
552578
@patch("hpc_provisioner.handlers.do_cluster_create")
553579
def test_dra_check_fs_for_other_cluster_creating(patched_do_cluster_create, patched_boto3):
@@ -556,7 +582,7 @@ def test_dra_check_fs_for_other_cluster_creating(patched_do_cluster_create, patc
556582
"FileSystems": [
557583
{
558584
"Lifecycle": "CREATING",
559-
"Tags": [{"Key": "Name", "Value": "scratch-pcluster-testing-proj2"}],
585+
"Tags": [{"Key": "Name", "Value": "pcluster-testing-proj2"}],
560586
}
561587
]
562588
}
@@ -572,7 +598,17 @@ def test_dra_check_fs_for_other_cluster_creating(patched_do_cluster_create, patc
572598
return_value = handlers.dra_check_handler({})
573599
assert return_value == {
574600
"statusCode": 200,
575-
"body": f"A filesystem is being created - skipping cluster {pending_clusters[0].name} for now",
601+
"headers": {"Content-Type": "application/json"},
602+
"body": json.dumps(
603+
{
604+
pending_clusters[
605+
0
606+
].name: f"A filesystem is being created - skipping cluster {pending_clusters[0].name} for now",
607+
pending_clusters[
608+
1
609+
].name: f"A filesystem is being created - skipping cluster {pending_clusters[1].name} for now",
610+
}
611+
),
576612
}
577613
patched_do_cluster_create.assert_not_called()
578614
mock_fsx_client.create_file_system.assert_not_called()

0 commit comments

Comments
 (0)