-
Notifications
You must be signed in to change notification settings - Fork 1.2k
/
Copy pathestimator.py
4118 lines (3634 loc) · 203 KB
/
estimator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import, print_function
import json
import logging
import os
import re
import uuid
from abc import ABCMeta, abstractmethod
from typing import Any, Dict, Union, Optional, List
from packaging.specifiers import SpecifierSet
from packaging.version import Version
from six import string_types, with_metaclass
from six.moves.urllib.parse import urlparse
import sagemaker
from sagemaker import git_utils, image_uris, vpc_utils, s3
from sagemaker.analytics import TrainingJobAnalytics
from sagemaker.config import (
ESTIMATOR_DEBUG_HOOK_CONFIG_PATH,
TRAINING_JOB_VOLUME_KMS_KEY_ID_PATH,
TRAINING_JOB_SECURITY_GROUP_IDS_PATH,
TRAINING_JOB_SUBNETS_PATH,
TRAINING_JOB_KMS_KEY_ID_PATH,
TRAINING_JOB_ROLE_ARN_PATH,
TRAINING_JOB_ENABLE_NETWORK_ISOLATION_PATH,
TRAINING_JOB_ENVIRONMENT_PATH,
TRAINING_JOB_DISABLE_PROFILER_PATH,
TRAINING_JOB_INTER_CONTAINER_ENCRYPTION_PATH,
)
from sagemaker.debugger import ( # noqa: F401 # pylint: disable=unused-import
DEBUGGER_FLAG,
DebuggerHookConfig,
FrameworkProfile,
ProfilerConfig,
ProfilerRule,
Rule,
TensorBoardOutputConfig,
get_default_profiler_processing_job,
get_rule_container_image_uri,
RuleBase,
)
from sagemaker.deprecations import removed_function, removed_kwargs, renamed_kwargs
from sagemaker.fw_utils import (
UploadedCode,
_region_supports_debugger,
_region_supports_profiler,
_instance_type_supports_profiler,
get_mp_parameters,
tar_and_upload_dir,
validate_source_dir,
validate_source_code_input_against_pipeline_variables,
)
from sagemaker.inputs import TrainingInput, FileSystemInput
from sagemaker.interactive_apps import SupportedInteractiveAppTypes
from sagemaker.interactive_apps.tensorboard import TensorBoardApp
from sagemaker.instance_group import InstanceGroup
from sagemaker.model_card.model_card import ModelCard, TrainingDetails
from sagemaker.utils import instance_supports_kms
from sagemaker.job import _Job
from sagemaker.jumpstart.utils import (
add_jumpstart_uri_tags,
get_jumpstart_base_name_if_jumpstart_model,
update_inference_tags_with_jumpstart_training_tags,
)
from sagemaker.local import LocalSession
from sagemaker.model import (
CONTAINER_LOG_LEVEL_PARAM_NAME,
DIR_PARAM_NAME,
JOB_NAME_PARAM_NAME,
NEO_ALLOWED_FRAMEWORKS,
SAGEMAKER_REGION_PARAM_NAME,
SCRIPT_PARAM_NAME,
Model,
)
from sagemaker.predictor import Predictor
from sagemaker.s3 import S3Uploader, parse_s3_url
from sagemaker.session import Session
from sagemaker.transformer import Transformer
from sagemaker.utils import (
base_from_name,
base_name_from_image,
build_dict,
get_config_value,
name_from_base,
to_string,
check_and_get_run_experiment_config,
resolve_value_from_config,
format_tags,
Tags,
)
from sagemaker.workflow import is_pipeline_variable
from sagemaker.workflow.entities import PipelineVariable
from sagemaker.workflow.parameters import ParameterString
from sagemaker.workflow.pipeline_context import PipelineSession, runnable_by_pipeline
from sagemaker.telemetry.telemetry_logging import _telemetry_emitter
from sagemaker.telemetry.constants import Feature
logger = logging.getLogger(__name__)
class EstimatorBase(with_metaclass(ABCMeta, object)): # pylint: disable=too-many-public-methods
"""Handle end-to-end Amazon SageMaker training and deployment tasks.
For introduction to model training and deployment, see
http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html
Subclasses must define a way to determine what image to use for training,
what hyperparameters to use, and how to create an appropriate predictor
instance.
"""
LAUNCH_PT_XLA_ENV_NAME = "sagemaker_pytorch_xla_multi_worker_enabled"
LAUNCH_PS_ENV_NAME = "sagemaker_parameter_server_enabled"
LAUNCH_MPI_ENV_NAME = "sagemaker_mpi_enabled"
LAUNCH_SM_DDP_ENV_NAME = "sagemaker_distributed_dataparallel_enabled"
LAUNCH_MWMS_ENV_NAME = "sagemaker_multi_worker_mirrored_strategy_enabled"
INSTANCE_TYPE = "sagemaker_instance_type"
MPI_NUM_PROCESSES_PER_HOST = "sagemaker_mpi_num_of_processes_per_host"
MPI_CUSTOM_MPI_OPTIONS = "sagemaker_mpi_custom_mpi_options"
SM_DDP_CUSTOM_MPI_OPTIONS = "sagemaker_distributed_dataparallel_custom_mpi_options"
CONTAINER_CODE_CHANNEL_SOURCEDIR_PATH = "/opt/ml/input/data/code/sourcedir.tar.gz"
JOB_CLASS_NAME = "training-job"
def __init__(
self,
role: str = None,
instance_count: Optional[Union[int, PipelineVariable]] = None,
instance_type: Optional[Union[str, PipelineVariable]] = None,
keep_alive_period_in_seconds: Optional[Union[int, PipelineVariable]] = None,
volume_size: Union[int, PipelineVariable] = 30,
volume_kms_key: Optional[Union[str, PipelineVariable]] = None,
max_run: Union[int, PipelineVariable] = 24 * 60 * 60,
input_mode: Union[str, PipelineVariable] = "File",
output_path: Optional[Union[str, PipelineVariable]] = None,
output_kms_key: Optional[Union[str, PipelineVariable]] = None,
base_job_name: Optional[str] = None,
sagemaker_session: Optional[Session] = None,
tags: Optional[Tags] = None,
subnets: Optional[List[Union[str, PipelineVariable]]] = None,
security_group_ids: Optional[List[Union[str, PipelineVariable]]] = None,
model_uri: Optional[str] = None,
model_channel_name: Union[str, PipelineVariable] = "model",
metric_definitions: Optional[List[Dict[str, Union[str, PipelineVariable]]]] = None,
encrypt_inter_container_traffic: Union[bool, PipelineVariable] = None,
use_spot_instances: Union[bool, PipelineVariable] = False,
max_wait: Optional[Union[int, PipelineVariable]] = None,
checkpoint_s3_uri: Optional[Union[str, PipelineVariable]] = None,
checkpoint_local_path: Optional[Union[str, PipelineVariable]] = None,
rules: Optional[List[RuleBase]] = None,
debugger_hook_config: Optional[Union[bool, DebuggerHookConfig]] = None,
tensorboard_output_config: Optional[TensorBoardOutputConfig] = None,
enable_sagemaker_metrics: Optional[Union[bool, PipelineVariable]] = None,
enable_network_isolation: Union[bool, PipelineVariable] = None,
profiler_config: Optional[ProfilerConfig] = None,
disable_profiler: bool = None,
environment: Optional[Dict[str, Union[str, PipelineVariable]]] = None,
max_retry_attempts: Optional[Union[int, PipelineVariable]] = None,
source_dir: Optional[Union[str, PipelineVariable]] = None,
git_config: Optional[Dict[str, str]] = None,
hyperparameters: Optional[Dict[str, Union[str, PipelineVariable]]] = None,
container_log_level: Union[int, PipelineVariable] = logging.INFO,
code_location: Optional[str] = None,
entry_point: Optional[Union[str, PipelineVariable]] = None,
dependencies: Optional[List[Union[str]]] = None,
instance_groups: Optional[List[InstanceGroup]] = None,
training_repository_access_mode: Optional[Union[str, PipelineVariable]] = None,
training_repository_credentials_provider_arn: Optional[Union[str, PipelineVariable]] = None,
enable_infra_check: Optional[Union[bool, PipelineVariable]] = None,
container_entry_point: Optional[List[str]] = None,
container_arguments: Optional[List[str]] = None,
disable_output_compression: bool = False,
enable_remote_debug: Optional[Union[bool, PipelineVariable]] = None,
enable_session_tag_chaining: Optional[Union[bool, PipelineVariable]] = None,
training_plan: Optional[Union[str, PipelineVariable]] = None,
**kwargs,
):
"""Initialize an ``EstimatorBase`` instance.
Args:
role (str): An AWS IAM role (either name or full ARN). The Amazon
SageMaker training jobs and APIs that create Amazon SageMaker
endpoints use this role to access training data and model
artifacts. After the endpoint is created, the inference code
might use the IAM role, if it needs to access an AWS resource.
instance_count (int or PipelineVariable): Number of Amazon EC2 instances to use
for training. Required if instance_groups is not set.
instance_type (str or PipelineVariable): Type of EC2 instance to use for training,
for example, ``'ml.c4.xlarge'``. Required if instance_groups is
not set.
keep_alive_period_in_seconds (int): The duration of time in seconds
to retain configured resources in a warm pool for subsequent
training jobs (default: None).
volume_size (int or PipelineVariable): Size in GB of the storage volume to use for
storing input and output data during training (default: 30).
Must be large enough to store training data if File mode is
used, which is the default mode.
When you use an ML instance with the EBS-only storage option
such as ``ml.c5`` and ``ml.p2``,
you must define the size of the EBS
volume through the ``volume_size`` parameter in the estimator class.
.. note::
When you use an ML instance with `NVMe SSD volumes
<https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html#nvme-ssd-volumes>`_
such as ``ml.p4d``, ``ml.g4dn``, and ``ml.g5``,
do not include this parameter in the estimator configuration.
If you use one of those ML instance types,
SageMaker doesn't provision Amazon EBS General Purpose SSD
(gp2) storage nor take this parameter to adjust the NVMe instance storage.
Available storage is fixed to the NVMe instance storage
capacity. SageMaker configures storage paths for training
datasets, checkpoints, model artifacts, and outputs to use the
entire capacity of the instance storage.
Note that if you include this parameter and specify a number that
exceeds the size of the NVMe volume attached to the instance type,
SageMaker returns an ``Invalid VolumeSizeInGB`` error.
To look up instance types and their instance storage types
and volumes, see `Amazon EC2 Instance Types
<http://aws.amazon.com/ec2/instance-types/>`_.
To find the default local paths defined by the SageMaker
training platform, see `Amazon SageMaker Training Storage
Folders for Training Datasets, Checkpoints, Model Artifacts,
and Outputs
<https://docs.aws.amazon.com/sagemaker/latest/dg/model-train-storage.html>`_.
volume_kms_key (str or PipelineVariable): Optional. KMS key ID for encrypting EBS
volume attached to the training instance (default: None).
max_run (int or PipelineVariable): Timeout in seconds for training (default: 24 *
60 * 60). After this amount of time Amazon SageMaker terminates
the job regardless of its current status.
input_mode (str or PipelineVariable): The input mode that the algorithm supports
(default: 'File'). Valid modes:
'File' - Amazon SageMaker copies the training dataset from the
S3 location to a local directory.
'Pipe' - Amazon SageMaker streams data directly from S3 to the
container via a Unix-named pipe.
'FastFile' - Amazon SageMaker streams data from S3 on demand instead of
downloading the entire dataset before training begins. This argument can
be overriden on a per-channel basis using
``sagemaker.inputs.TrainingInput.input_mode``.
output_path (str or PipelineVariable): S3 location for saving the training result (model
artifacts and output files). If not specified, results are
stored to a default bucket. If the bucket with the specific name
does not exist, the estimator creates the bucket during the
:meth:`~sagemaker.estimator.EstimatorBase.fit` method execution.
file:// urls are used for local mode. For example: 'file://model/'
will save to the model folder in the current directory.
output_kms_key (str or PipelineVariable): Optional. KMS key ID for encrypting the
training output (default: Your IAM role's KMS key for Amazon S3).
If you don't provide a KMS key ID, Amazon SageMaker uses the
default KMS key for Amazon S3 of the account linked to your
IAM role.
base_job_name (str): Prefix for training job name when the
:meth:`~sagemaker.estimator.EstimatorBase.fit` method launches.
If not specified, the estimator generates a default job name
based on the training image name and current timestamp.
sagemaker_session (sagemaker.session.Session): Session object which
manages interactions with Amazon SageMaker APIs and any other
AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
tags (Optional[Tags]):
Tags for labeling a training job. These won't be propagated to Models,
Endpoints during :meth:`~sagemaker.estimator.EstimatorBase.deploy`. The
:meth:`~sagemaker.estimator.EstimatorBase.deploy` takes in a seperate
tags parameter. For more on tags, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
subnets (list[str] or list[PipelineVariable]): List of subnet ids. If not
specified training job will be created without VPC config.
security_group_ids (list[str] or list[PipelineVariable]): List of security group ids.
If not specified training job will be created without VPC config.
model_uri (str): URI where a pre-trained model is stored, either
locally or in S3 (default: None). If specified, the estimator
will create a channel pointing to the model so the training job
can download it. This model can be a 'model.tar.gz' from a
previous training job, or other artifacts coming from a
different source.
In local mode, this should point to the path in which the model
is located and not the file itself, as local Docker containers
will try to mount the URI as a volume.
More information:
https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-training.html#td-deserialization
model_channel_name (str or PipelineVariable): Name of the channel where 'model_uri' will
be downloaded (default: 'model').
metric_definitions (list[dict[str, str] or list[dict[str, PipelineVariable]]):
A list of dictionaries that defines the metric(s) used to evaluate the
training jobs. Each dictionary contains two keys: 'Name' for the name of the metric,
and 'Regex' for the regular expression used to extract the
metric from the logs. This should be defined only for jobs that
don't use an Amazon algorithm.
encrypt_inter_container_traffic (bool or PipelineVariable): Specifies whether traffic
between training containers is encrypted for the training job
(default: ``False``).
use_spot_instances (bool or PipelineVariable): Specifies whether to use SageMaker
Managed Spot instances for training. If enabled then the
``max_wait`` arg should also be set.
More information:
https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html
(default: ``False``).
max_wait (int or PipelineVariable): Timeout in seconds waiting for spot training
job (default: None). After this amount of time Amazon
SageMaker will stop waiting for managed spot training job to
complete (default: None).
checkpoint_s3_uri (str or PipelineVariable): The S3 URI in which to persist checkpoints
that the algorithm persists (if any) during training. (default:
``None``).
checkpoint_local_path (str or PipelineVariable): The local path that the algorithm
writes its checkpoints to. SageMaker will persist all files
under this path to `checkpoint_s3_uri` continually during
training. On job startup the reverse happens - data from the
s3 location is downloaded to this path before the algorithm is
started. If the path is unset then SageMaker assumes the
checkpoints will be provided under `/opt/ml/checkpoints/`.
(default: None).
rules (list[:class:`~sagemaker.debugger.RuleBase`]): A list of
:class:`~sagemaker.debugger.RuleBase` objects used to define
SageMaker Debugger rules for real-time analysis
(default: ``None``). For more information,
see `Continuous analyses through rules
<https://sagemaker.readthedocs.io/en/stable/amazon_sagemaker_debugger.html
#continuous-analyses-through-rules)>`_.
debugger_hook_config (:class:`~sagemaker.debugger.DebuggerHookConfig` or bool):
Configuration for how debugging information is emitted with
SageMaker Debugger. If not specified, a default one is created using
the estimator's ``output_path``, unless the region does not
support SageMaker Debugger. To disable SageMaker Debugger,
set this parameter to ``False``. For more information, see
`Capture real-time debugging data during model training in Amazon SageMaker
<https://sagemaker.readthedocs.io/en/stable/amazon_sagemaker_debugger.html#
capture-real-time-debugging-data-during-model-training-in-amazon-sagemaker>`_.
tensorboard_output_config (:class:`~sagemaker.debugger.TensorBoardOutputConfig`):
Configuration for customizing debugging visualization using TensorBoard
(default: None). For more information,
see `Capture real time tensorboard data
<https://sagemaker.readthedocs.io/en/stable/amazon_sagemaker_debugger.html#
capture-real-time-tensorboard-data-from-the-debugging-hook>`_.
enable_sagemaker_metrics (bool or PipelineVariable): enable SageMaker Metrics Time
Series. For more information, see `AlgorithmSpecification API
<https://docs.aws.amazon.com/sagemaker/latest/dg/
API_AlgorithmSpecification.html#SageMaker-Type-AlgorithmSpecification-
EnableSageMakerMetricsTimeSeries>`_.
(default: None).
enable_network_isolation (bool or PipelineVariable): Specifies whether container will
run in network isolation mode (default: ``False``). Network
isolation mode restricts the container access to outside networks
(such as the Internet). The container does not make any inbound or
outbound network calls. Also known as Internet-free mode.
profiler_config (:class:`~sagemaker.debugger.ProfilerConfig`):
Configuration for how SageMaker Debugger collects
monitoring and profiling information from your training job.
If not specified, a default configuration is created using
the estimator's ``output_path``, unless the region does not
support SageMaker Debugger. To disable SageMaker Debugger
monitoring and profiling, set the
``disable_profiler`` parameter to ``True``.
disable_profiler (bool): Specifies whether Debugger monitoring and profiling
will be disabled (default: ``False``).
environment (dict[str, str] or dict[str, PipelineVariable]) : Environment variables
to be set for use during training job (default: None)
max_retry_attempts (int or PipelineVariable): The number of times to move a job
to the STARTING status. You can specify between 1 and 30 attempts.
If the value of attempts is greater than zero,
the job is retried on InternalServerFailure
the same number of attempts as the value.
You can cap the total duration for your job by setting ``max_wait`` and ``max_run``
(default: None)
source_dir (str or PipelineVariable): The absolute, relative, or S3 URI Path to
a directory with any other training source code dependencies aside from the entry
point file (default: None). If ``source_dir`` is an S3 URI, it must
point to a file with name ``sourcedir.tar.gz``. The structure within this directory
is preserved when training on Amazon SageMaker. If 'git_config' is provided,
'source_dir' should be a relative location to a directory in the Git
repo.
With the following GitHub repo directory structure:
.. code::
|----- README.md
|----- src
|----- train.py
|----- test.py
if you need 'train.py' as the entry point and 'test.py' as
the training source code, you can assign
entry_point='train.py' and source_dir='src'.
git_config (dict[str, str]): Git configurations used for cloning
files, including ``repo``, ``branch``, ``commit``,
``2FA_enabled``, ``username``, ``password``, and ``token``. The
``repo`` field is required. All other fields are optional.
``repo`` specifies the Git repository where your training script
is stored. If you don't provide ``branch``, the default value
'master' is used. If you don't provide ``commit``, the latest
commit in the specified branch is used. For example, the following config:
.. code:: python
git_config = {
'repo': 'https://github.com/aws/sagemaker-python-sdk.git',
'branch': 'test-branch-git-config',
'commit': '329bfcf884482002c05ff7f44f62599ebc9f445a'
}
results in cloning the repo specified in 'repo', then
checking out the 'master' branch, and checking out the specified
commit.
``2FA_enabled``, ``username``, ``password``, and ``token`` are
used for authentication. For GitHub (or other Git) accounts, set
``2FA_enabled`` to 'True' if two-factor authentication is
enabled for the account, otherwise set it to 'False'. If you do
not provide a value for ``2FA_enabled``, a default value of
'False' is used. CodeCommit does not support two-factor
authentication, so do not provide "2FA_enabled" with CodeCommit
repositories.
For GitHub and other Git repos, when SSH URLs are provided, it
doesn't matter whether 2FA is enabled or disabled. You should
either have no passphrase for the SSH key pairs or have the
ssh-agent configured so that you will not be prompted for the SSH
passphrase when you run the 'git clone' command with SSH URLs. When
HTTPS URLs are provided, if 2FA is disabled, then either ``token``
or ``username`` and ``password`` are be used for authentication if provided.
``Token`` is prioritized. If 2FA is enabled, only ``token`` is used
for authentication if provided. If required authentication info
is not provided, the SageMaker Python SDK attempts to use local credentials
to authenticate. If that fails, an error message is thrown.
For CodeCommit repos, 2FA is not supported, so '2FA_enabled'
should not be provided. There is no token in CodeCommit, so
``token`` should also not be provided. When ``repo`` is an SSH URL,
the requirements are the same as GitHub repos. When ``repo``
is an HTTPS URL, ``username`` and ``password`` are used for
authentication if they are provided. If they are not provided,
the SageMaker Python SDK attempts to use either the CodeCommit
credential helper or local credential storage for authentication.
hyperparameters (dict[str, str] or dict[str, PipelineVariable]):
A dictionary containing the hyperparameters to
initialize this estimator with. (Default: None).
.. caution::
You must not include any security-sensitive information, such as
account access IDs, secrets, and tokens, in the dictionary for configuring
hyperparameters. SageMaker rejects the training job request and returns an
validation error for detected credentials, if such user input is found.
container_log_level (int or PipelineVariable): The log level to use within the container
(default: logging.INFO). Valid values are defined in the Python
logging module.
code_location (str): The S3 prefix URI where custom code is
uploaded (default: None). You must not include a trailing slash because
a string prepended with a "/" is appended to ``code_location``. The code
file uploaded to S3 is 'code_location/job-name/source/sourcedir.tar.gz'.
If not specified, the default ``code location`` is 's3://output_bucket/job-name/'.
entry_point (str or PipelineVariable): The absolute or relative path to the local Python
source file that should be executed as the entry point to
training. (Default: None). If ``source_dir`` is specified, then ``entry_point``
must point to a file located at the root of ``source_dir``.
If 'git_config' is provided, 'entry_point' should be
a relative location to the Python source file in the Git repo.
Example:
With the following GitHub repo directory structure:
>>> |----- README.md
>>> |----- src
>>> |----- train.py
>>> |----- test.py
You can assign entry_point='src/train.py'.
dependencies (list[str]): A list of absolute or relative paths to directories
with any additional libraries that should be exported
to the container (default: []). The library folders are
copied to SageMaker in the same folder where the entrypoint is
copied. If 'git_config' is provided, 'dependencies' should be a
list of relative locations to directories with any additional
libraries needed in the Git repo.
.. admonition:: Example
The following Estimator call:
>>> Estimator(entry_point='train.py',
... dependencies=['my/libs/common', 'virtual-env'])
results in the following structure inside the container:
>>> $ ls
>>> opt/ml/code
>>> |------ train.py
>>> |------ common
>>> |------ virtual-env
This is not supported with "local code" in Local Mode.
instance_groups (list[:class:`sagemaker.instance_group.InstanceGroup`]):
Optional. A list of ``InstanceGroup`` objects
for launching a training job with a heterogeneous cluster.
For example:
.. code:: python
instance_groups=[
sagemaker.InstanceGroup(
'instance_group_name_1', 'ml.p3dn.24xlarge', 64),
sagemaker.InstanceGroup(
'instance_group_name_2', 'ml.c5n.18xlarge', 64)]
For instructions on how to use ``InstanceGroup`` objects
to configure a heterogeneous cluster
through the SageMaker generic and framework estimator classes, see
`Train Using a Heterogeneous Cluster
<https://docs.aws.amazon.com/sagemaker/latest/dg/train-heterogeneous-cluster.html>`_
in the *Amazon SageMaker developer guide*.
training_repository_access_mode (str): Optional. Specifies how SageMaker accesses the
Docker image that contains the training algorithm (default: None).
Set this to one of the following values:
* 'Platform' - The training image is hosted in Amazon ECR.
* 'Vpc' - The training image is hosted in a private Docker registry in your VPC.
When it's default to None, its behavior will be same as 'Platform' - image is hosted
in ECR.
training_repository_credentials_provider_arn (str): Optional. The Amazon Resource Name
(ARN) of an AWS Lambda function that provides credentials to authenticate to the
private Docker registry where your training image is hosted (default: None).
When it's set to None, SageMaker will not do authentication before pulling the image
in the private Docker registry.
container_entry_point (List[str]): Optional. The entrypoint script for a Docker
container used to run a training job. This script takes precedence over
the default train processing instructions.
container_arguments (List[str]): Optional. The arguments for a container used to run
a training job.
disable_output_compression (bool): Optional. When set to true, Model is uploaded
to Amazon S3 without compression after training finishes.
enable_infra_check (bool or PipelineVariable): Optional.
Specifies whether it is running Sagemaker built-in infra check jobs.
enable_remote_debug (bool or PipelineVariable): Optional.
Specifies whether RemoteDebug is enabled for the training job.
enable_session_tag_chaining (bool or PipelineVariable): Optional.
Specifies whether SessionTagChaining is enabled for the training job.
training_plan (str or PipelineVariable): Optional.
Specifies which training plan arn to use for the training job
"""
instance_count = renamed_kwargs(
"train_instance_count", "instance_count", instance_count, kwargs
)
instance_type = renamed_kwargs(
"train_instance_type", "instance_type", instance_type, kwargs
)
max_run = renamed_kwargs("train_max_run", "max_run", max_run, kwargs)
use_spot_instances = renamed_kwargs(
"train_use_spot_instances", "use_spot_instances", use_spot_instances, kwargs
)
max_wait = renamed_kwargs("train_max_wait", "max_wait", max_wait, kwargs)
volume_size = renamed_kwargs("train_volume_size", "volume_size", volume_size, kwargs)
volume_kms_key = renamed_kwargs(
"train_volume_kms_key", "volume_kms_key", volume_kms_key, kwargs
)
self.instance_count = instance_count
self.instance_type = instance_type
self.keep_alive_period_in_seconds = keep_alive_period_in_seconds
self.instance_groups = instance_groups
self.volume_size = volume_size
self.max_run = max_run
self.input_mode = input_mode
self.metric_definitions = metric_definitions
self.model_uri = model_uri
self.model_channel_name = model_channel_name
self.code_uri = None
self.code_channel_name = "code"
self.source_dir = source_dir
self.git_config = git_config
self.container_log_level = container_log_level
self._hyperparameters = hyperparameters.copy() if hyperparameters else {}
self.code_location = code_location
self.entry_point = entry_point
self.dependencies = dependencies or []
self.uploaded_code: Optional[UploadedCode] = None
# Check that the user properly sets both subnet and security_group_ids
if (
subnets is not None
and security_group_ids is None
or security_group_ids is not None
and subnets is None
):
troubleshooting = (
"Refer to this documentation on using custom VPC: "
"https://sagemaker.readthedocs.io/en/v2.24.0/overview.html"
"#secure-training-and-inference-with-vpc"
)
logger.error("Check troubleshooting guide for common errors: %s", troubleshooting)
raise RuntimeError(
"When setting up custom VPC, both subnets and security_group_ids must be set"
)
if self.instance_type in ("local", "local_gpu"):
if self.instance_type == "local_gpu" and self.instance_count > 1:
raise RuntimeError(
"Distributed Training in Local GPU is not supported."
" Set instance_count to 1."
)
self.sagemaker_session = sagemaker_session or LocalSession()
if not isinstance(self.sagemaker_session, sagemaker.local.LocalSession):
raise RuntimeError(
"instance_type local or local_gpu is only supported with an"
"instance of LocalSession. More details on local mode: "
"https://sagemaker.readthedocs.io/en/stable/overview.html#local-mode"
)
else:
self.sagemaker_session = sagemaker_session or Session()
tags = format_tags(tags)
self.tags = (
add_jumpstart_uri_tags(
tags=tags, training_model_uri=self.model_uri, training_script_uri=self.source_dir
)
if getattr(self.sagemaker_session, "settings", None) is not None
and self.sagemaker_session.settings.include_jumpstart_tags
else tags
)
self.base_job_name = base_job_name
self._current_job_name = None
if (
not self.sagemaker_session.local_mode
and output_path
and not is_pipeline_variable(output_path)
and output_path.startswith("file://")
):
raise RuntimeError(
"The 'file://' output paths are only supported when using Local Mode. "
"To resolve this issue, ensure you're running in Local Mode with a LocalSession, "
"or use an 's3://' output path for jobs running on SageMaker instances."
)
self.output_path = output_path
self.latest_training_job = None
self.jobs = []
self.deploy_instance_type = None
self._compiled_models = {}
self.role = resolve_value_from_config(
role, TRAINING_JOB_ROLE_ARN_PATH, sagemaker_session=self.sagemaker_session
)
if not self.role:
# Originally IAM role was a required parameter.
# Now we marked that as Optional because we can fetch it from SageMakerConfig
# Because of marking that parameter as optional, we should validate if it is None, even
# after fetching the config.
raise ValueError(
"An AWS IAM role is required to create an estimator. "
"Please provide a valid `role` argument with the ARN of an IAM role"
" that has the necessary SageMaker permissions."
)
self.output_kms_key = resolve_value_from_config(
output_kms_key, TRAINING_JOB_KMS_KEY_ID_PATH, sagemaker_session=self.sagemaker_session
)
use_volume_kms_config: bool = False
if instance_type is None or isinstance(instance_type, str):
instance_type_for_volume_kms = instance_type
elif isinstance(instance_type, PipelineVariable):
use_volume_kms_config = True
instance_type_for_volume_kms = instance_type
else:
raise ValueError(f"Bad value for instance type: '{instance_type}'")
# KMS can only be attached to supported instances
use_volume_kms_config = (
use_volume_kms_config
or (
instance_type_for_volume_kms and instance_supports_kms(instance_type_for_volume_kms)
)
or instance_groups is not None
and any(
[
instance_supports_kms(instance_group.instance_type)
for instance_group in instance_groups
]
)
)
self.volume_kms_key = (
resolve_value_from_config(
volume_kms_key,
TRAINING_JOB_VOLUME_KMS_KEY_ID_PATH,
sagemaker_session=self.sagemaker_session,
)
if use_volume_kms_config
else volume_kms_key
)
# VPC configurations
self.subnets = resolve_value_from_config(
subnets, TRAINING_JOB_SUBNETS_PATH, sagemaker_session=self.sagemaker_session
)
self.security_group_ids = resolve_value_from_config(
security_group_ids,
TRAINING_JOB_SECURITY_GROUP_IDS_PATH,
sagemaker_session=self.sagemaker_session,
)
# training image configs
self.training_repository_access_mode = training_repository_access_mode
self.training_repository_credentials_provider_arn = (
training_repository_credentials_provider_arn
)
self.enable_infra_check = enable_infra_check
# container entry point / arguments configs
self.container_entry_point = container_entry_point
self.container_arguments = container_arguments
self.encrypt_inter_container_traffic = resolve_value_from_config(
direct_input=encrypt_inter_container_traffic,
config_path=TRAINING_JOB_INTER_CONTAINER_ENCRYPTION_PATH,
default_value=False,
sagemaker_session=self.sagemaker_session,
)
self.use_spot_instances = use_spot_instances
self.max_wait = max_wait
self.checkpoint_s3_uri = checkpoint_s3_uri
self.checkpoint_local_path = checkpoint_local_path
self.rules = rules
# Today, we ONLY support debugger_hook_config to be provided as a boolean value
# from sagemaker_config. We resolve value for this parameter as per the order
# 1. value from direct_input which can be a boolean or a dictionary
# 2. value from sagemaker_config which can be a boolean
# In future, if we support debugger_hook_config to be provided as a dictionary
# from sagemaker_config [SageMaker.TrainingJob] then we will need to update the
# logic below to resolve the values as per the type of value received from
# direct_input and sagemaker_config
self.debugger_hook_config = resolve_value_from_config(
direct_input=debugger_hook_config,
config_path=ESTIMATOR_DEBUG_HOOK_CONFIG_PATH,
sagemaker_session=sagemaker_session,
)
# If customer passes True from either direct_input or sagemaker_config, we will
# create a default hook config as an empty dict which will later be populated
# with default s3_output_path from _prepare_debugger_for_training function
if self.debugger_hook_config is True:
self.debugger_hook_config = {}
self.tensorboard_output_config = tensorboard_output_config
self.debugger_rule_configs, self.collection_configs = None, None
self.enable_sagemaker_metrics = enable_sagemaker_metrics
self._enable_network_isolation = resolve_value_from_config(
direct_input=enable_network_isolation,
config_path=TRAINING_JOB_ENABLE_NETWORK_ISOLATION_PATH,
default_value=False,
sagemaker_session=self.sagemaker_session,
)
self.profiler_rule_configs, self.profiler_rules = None, None
self.profiler_config = profiler_config
self.disable_profiler = resolve_value_from_config(
direct_input=disable_profiler,
config_path=TRAINING_JOB_DISABLE_PROFILER_PATH,
default_value=False,
sagemaker_session=self.sagemaker_session,
)
self.environment = resolve_value_from_config(
direct_input=environment,
config_path=TRAINING_JOB_ENVIRONMENT_PATH,
default_value=None,
sagemaker_session=self.sagemaker_session,
)
self.max_retry_attempts = max_retry_attempts
if not _region_supports_profiler(
self.sagemaker_session.boto_region_name
) or _instance_type_supports_profiler(self.instance_type):
self.disable_profiler = True
self.debugger_rules = None
self.disable_output_compression = disable_output_compression
validate_source_code_input_against_pipeline_variables(
entry_point=entry_point,
source_dir=source_dir,
git_config=git_config,
enable_network_isolation=self._enable_network_isolation,
)
self.training_plan = training_plan
# Internal flag
self._is_output_path_set_from_default_bucket_and_prefix = False
self.tensorboard_app = TensorBoardApp(region=self.sagemaker_session.boto_region_name)
self._enable_remote_debug = enable_remote_debug
self._enable_session_tag_chaining = enable_session_tag_chaining
@abstractmethod
def training_image_uri(self):
"""Return the Docker image to use for training.
The :meth:`~sagemaker.estimator.EstimatorBase.fit` method, which does
the model training, calls this method to find the image to use for model
training.
Returns:
str: The URI of the Docker image.
"""
@abstractmethod
def hyperparameters(self):
"""Return the hyperparameters as a dictionary to use for training.
The :meth:`~sagemaker.estimator.EstimatorBase.fit` method, which
trains the model, calls this method to find the hyperparameters.
Returns:
dict[str, str]: The hyperparameters.
"""
def enable_network_isolation(self):
"""Return True if this Estimator will need network isolation to run.
Returns:
bool: Whether this Estimator needs network isolation or not.
"""
return self._enable_network_isolation
def prepare_workflow_for_training(self, job_name=None):
"""Calls _prepare_for_training. Used when setting up a workflow.
Args:
job_name (str): Name of the training job to be created. If not
specified, one is generated, using the base name given to the
constructor if applicable.
"""
self._prepare_for_training(job_name=job_name)
def _ensure_base_job_name(self):
"""Set ``self.base_job_name`` if it is not set already."""
# honor supplied base_job_name or generate it
self.base_job_name = (
self.base_job_name
or get_jumpstart_base_name_if_jumpstart_model(self.source_dir, self.model_uri)
or base_name_from_image(
self.training_image_uri(), default_base_name=EstimatorBase.JOB_CLASS_NAME
)
)
def _get_or_create_name(self, name=None):
"""Generate a name based on the base job name or training image if needed.
Args:
name (str): User-supplied name. If not specified, a name is generated from
the base job name or training image.
Returns:
str: Either the user-supplied name or a generated name.
"""
if name:
return name
self._ensure_base_job_name()
return name_from_base(self.base_job_name)
@staticmethod
def _json_encode_hyperparameters(hyperparameters: Dict[str, Any]) -> Dict[str, Any]:
"""Applies JSON encoding for certain hyperparameter types, returns hyperparameters.
Args:
hyperparameters (dict): Dictionary of hyperparameters.
"""
current_hyperparameters = hyperparameters
if current_hyperparameters is not None:
hyperparameters = {
str(k): (v.to_string() if is_pipeline_variable(v) else json.dumps(v))
for (k, v) in current_hyperparameters.items()
}
return hyperparameters
def _prepare_for_training(self, job_name=None):
"""Set any values in the estimator that need to be set before training.
Args:
job_name (str): Name of the training job to be created. If not
specified, one is generated, using the base name given to the
constructor if applicable.
"""
self._current_job_name = self._get_or_create_name(job_name)
# if output_path was specified we use it otherwise initialize here.
# For Local Mode with local_code=True we don't need an explicit output_path
if self.output_path is None:
local_code = get_config_value("local.local_code", self.sagemaker_session.config)
if self.sagemaker_session.local_mode and local_code:
self.output_path = ""
else:
self.output_path = s3.s3_path_join(
"s3://",
self.sagemaker_session.default_bucket(),
self.sagemaker_session.default_bucket_prefix,
with_end_slash=True,
)
self._is_output_path_set_from_default_bucket_and_prefix = True
if self.git_config:
updated_paths = git_utils.git_clone_repo(
self.git_config, self.entry_point, self.source_dir, self.dependencies
)
self.entry_point = updated_paths["entry_point"]
self.source_dir = updated_paths["source_dir"]
self.dependencies = updated_paths["dependencies"]
if self.source_dir or self.entry_point or self.dependencies:
# validate source dir will raise a ValueError if there is something wrong with
# the source directory. We are intentionally not handling it because this is a
# critical error.
if (
self.source_dir
and not is_pipeline_variable(self.source_dir)
and not self.source_dir.lower().startswith("s3://")
):
validate_source_dir(self.entry_point, self.source_dir)
# if we are in local mode with local_code=True. We want the container to just
# mount the source dir instead of uploading to S3.
local_code = get_config_value("local.local_code", self.sagemaker_session.config)
if self.sagemaker_session.local_mode and local_code:
# if there is no source dir, use the directory containing the entry point.
if self.source_dir is None:
self.source_dir = os.path.dirname(self.entry_point)
self.entry_point = os.path.basename(self.entry_point)
code_dir = "file://" + self.source_dir
script = self.entry_point
elif self.enable_network_isolation() and self.entry_point:
self.uploaded_code = self._stage_user_code_in_s3()
code_dir = self.CONTAINER_CODE_CHANNEL_SOURCEDIR_PATH
script = self.uploaded_code.script_name
self.code_uri = self.uploaded_code.s3_prefix
else:
self.uploaded_code = self._stage_user_code_in_s3()
code_dir = self.uploaded_code.s3_prefix
script = self.uploaded_code.script_name
# Modify hyperparameters in-place to point to the right code directory and
# script URIs
self._script_mode_hyperparam_update(code_dir, script)
self._prepare_rules()
self._prepare_debugger_for_training()
self._prepare_profiler_for_training()
def _script_mode_hyperparam_update(self, code_dir: str, script: str) -> None:
"""Applies in-place updates to hyperparameters required for script mode with training.
Args:
code_dir (str): The directory hosting the training scripts.
script (str): The relative filepath of the training entry-point script.
"""
hyperparams: Dict[str, str] = {}
hyperparams[DIR_PARAM_NAME] = code_dir
hyperparams[SCRIPT_PARAM_NAME] = script
hyperparams[CONTAINER_LOG_LEVEL_PARAM_NAME] = self.container_log_level
hyperparams[JOB_NAME_PARAM_NAME] = self._current_job_name
hyperparams[SAGEMAKER_REGION_PARAM_NAME] = self.sagemaker_session.boto_region_name
self._hyperparameters.update(EstimatorBase._json_encode_hyperparameters(hyperparams))
def _stage_user_code_in_s3(self) -> UploadedCode:
"""Uploads the user training script to S3 and returns the S3 URI.
Returns: S3 URI
"""
if is_pipeline_variable(self.output_path):