Skip to content

Commit 81b6be3

Browse files
Daily Sync with Botocore v1.40.59 on 2025/10/27 (#344)
1 parent 46ee71c commit 81b6be3

File tree

4 files changed

+85
-4
lines changed

4 files changed

+85
-4
lines changed

sample/sagemaker/2017-07-24/service-2.json

Lines changed: 35 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12446,7 +12446,7 @@
1244612446
},
1244712447
"PlatformIdentifier":{
1244812448
"shape":"PlatformIdentifier",
12449-
"documentation":"<p>The platform identifier of the notebook instance runtime environment.</p>"
12449+
"documentation":"<p>The platform identifier of the notebook instance runtime environment. The default value is <code>notebook-al2-v2</code>.</p>"
1245012450
},
1245112451
"InstanceMetadataServiceConfiguration":{
1245212452
"shape":"InstanceMetadataServiceConfiguration",
@@ -21172,6 +21172,7 @@
2117221172
},
2117321173
"documentation":"<p>The configuration parameters that specify the IAM roles assumed by the execution role of SageMaker (assumable roles) and the cluster instances or job execution environments (execution roles or runtime roles) to manage and access resources required for running Amazon EMR clusters or Amazon EMR Serverless applications.</p>"
2117421174
},
21175+
"EnableCaching":{"type":"boolean"},
2117521176
"EnableCapture":{"type":"boolean"},
2117621177
"EnableInfraCheck":{
2117721178
"type":"boolean",
@@ -24706,6 +24707,30 @@
2470624707
"box":true,
2470724708
"min":0
2470824709
},
24710+
"InferenceComponentDataCacheConfig":{
24711+
"type":"structure",
24712+
"required":["EnableCaching"],
24713+
"members":{
24714+
"EnableCaching":{
24715+
"shape":"EnableCaching",
24716+
"documentation":"<p>Sets whether the endpoint that hosts the inference component caches the model artifacts and container image.</p> <p>With caching enabled, the endpoint caches this data in each instance that it provisions for the inference component. That way, the inference component deploys faster during the auto scaling process. If caching isn't enabled, the inference component takes longer to deploy because of the time it spends downloading the data.</p>",
24717+
"box":true
24718+
}
24719+
},
24720+
"documentation":"<p>Settings that affect how the inference component caches data.</p>"
24721+
},
24722+
"InferenceComponentDataCacheConfigSummary":{
24723+
"type":"structure",
24724+
"required":["EnableCaching"],
24725+
"members":{
24726+
"EnableCaching":{
24727+
"shape":"EnableCaching",
24728+
"documentation":"<p>Indicates whether the inference component caches model artifacts as part of the auto scaling process.</p>",
24729+
"box":true
24730+
}
24731+
},
24732+
"documentation":"<p>Settings that affect how the inference component caches data.</p>"
24733+
},
2470924734
"InferenceComponentDeploymentConfig":{
2471024735
"type":"structure",
2471124736
"required":["RollingUpdatePolicy"],
@@ -24811,6 +24836,10 @@
2481124836
"BaseInferenceComponentName":{
2481224837
"shape":"InferenceComponentName",
2481324838
"documentation":"<p>The name of an existing inference component that is to contain the inference component that you're creating with your request.</p> <p>Specify this parameter only if your request is meant to create an adapter inference component. An adapter inference component contains the path to an adapter model. The purpose of the adapter model is to tailor the inference output of a base foundation model, which is hosted by the base inference component. The adapter inference component uses the compute resources that you assigned to the base inference component.</p> <p>When you create an adapter inference component, use the <code>Container</code> parameter to specify the location of the adapter artifacts. In the parameter value, use the <code>ArtifactUrl</code> parameter of the <code>InferenceComponentContainerSpecification</code> data type.</p> <p>Before you can create an adapter inference component, you must have an existing inference component that contains the foundation model that you want to adapt.</p>"
24839+
},
24840+
"DataCacheConfig":{
24841+
"shape":"InferenceComponentDataCacheConfig",
24842+
"documentation":"<p>Settings that affect how the inference component caches data.</p>"
2481424843
}
2481524844
},
2481624845
"documentation":"<p>Details about the resources to deploy with this inference component, including the model, container, and compute resources.</p>"
@@ -24837,6 +24866,10 @@
2483724866
"BaseInferenceComponentName":{
2483824867
"shape":"InferenceComponentName",
2483924868
"documentation":"<p>The name of the base inference component that contains this inference component.</p>"
24869+
},
24870+
"DataCacheConfig":{
24871+
"shape":"InferenceComponentDataCacheConfigSummary",
24872+
"documentation":"<p>Settings that affect how the inference component caches data.</p>"
2484024873
}
2484124874
},
2484224875
"documentation":"<p>Details about the resources that are deployed with this inference component.</p>"
@@ -36136,7 +36169,7 @@
3613636169
},
3613736170
"S3DataDistributionType":{
3613836171
"shape":"ProcessingS3DataDistributionType",
36139-
"documentation":"<p>Whether to distribute the data from Amazon S3 to all processing instances with <code>FullyReplicated</code>, or whether the data from Amazon S3 is shared by Amazon S3 key, downloading one shard of data to each processing instance.</p>"
36172+
"documentation":"<p>Whether to distribute the data from Amazon S3 to all processing instances with <code>FullyReplicated</code>, or whether the data from Amazon S3 is sharded by Amazon S3 key, downloading one shard of data to each processing instance.</p>"
3614036173
},
3614136174
"S3CompressionType":{
3614236175
"shape":"ProcessingS3CompressionType",

src/sagemaker_core/main/code_injection/shape_dag.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8365,6 +8365,14 @@
83658365
],
83668366
"type": "structure",
83678367
},
8368+
"InferenceComponentDataCacheConfig": {
8369+
"members": [{"name": "EnableCaching", "shape": "EnableCaching", "type": "boolean"}],
8370+
"type": "structure",
8371+
},
8372+
"InferenceComponentDataCacheConfigSummary": {
8373+
"members": [{"name": "EnableCaching", "shape": "EnableCaching", "type": "boolean"}],
8374+
"type": "structure",
8375+
},
83688376
"InferenceComponentDeploymentConfig": {
83698377
"members": [
83708378
{
@@ -8437,6 +8445,11 @@
84378445
"shape": "InferenceComponentName",
84388446
"type": "string",
84398447
},
8448+
{
8449+
"name": "DataCacheConfig",
8450+
"shape": "InferenceComponentDataCacheConfig",
8451+
"type": "structure",
8452+
},
84408453
],
84418454
"type": "structure",
84428455
},
@@ -8463,6 +8476,11 @@
84638476
"shape": "InferenceComponentName",
84648477
"type": "string",
84658478
},
8479+
{
8480+
"name": "DataCacheConfig",
8481+
"shape": "InferenceComponentDataCacheConfigSummary",
8482+
"type": "structure",
8483+
},
84668484
],
84678485
"type": "structure",
84688486
},

src/sagemaker_core/main/resources.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22677,7 +22677,7 @@ def create(
2267722677
default_code_repository: A Git repository to associate with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in Amazon Web Services CodeCommit or in any other Git repository. When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances.
2267822678
additional_code_repositories: An array of up to three Git repositories to associate with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in Amazon Web Services CodeCommit or in any other Git repository. These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git Repositories with SageMaker AI Notebook Instances.
2267922679
root_access: Whether root access is enabled or disabled for users of the notebook instance. The default value is Enabled. Lifecycle configurations need root access to be able to set up a notebook instance. Because of this, lifecycle configurations associated with a notebook instance always run with root access even if you disable root access for users.
22680-
platform_identifier: The platform identifier of the notebook instance runtime environment.
22680+
platform_identifier: The platform identifier of the notebook instance runtime environment. The default value is notebook-al2-v2.
2268122681
instance_metadata_service_configuration: Information on the IMDS configuration of the notebook instance
2268222682
session: Boto3 session.
2268322683
region: Region name.

src/sagemaker_core/main/shapes.py

Lines changed: 31 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6377,6 +6377,19 @@ class InferenceComponentComputeResourceRequirements(Base):
63776377
max_memory_required_in_mb: Optional[int] = Unassigned()
63786378

63796379

6380+
class InferenceComponentDataCacheConfig(Base):
6381+
"""
6382+
InferenceComponentDataCacheConfig
6383+
Settings that affect how the inference component caches data.
6384+
6385+
Attributes
6386+
----------------------
6387+
enable_caching: Sets whether the endpoint that hosts the inference component caches the model artifacts and container image. With caching enabled, the endpoint caches this data in each instance that it provisions for the inference component. That way, the inference component deploys faster during the auto scaling process. If caching isn't enabled, the inference component takes longer to deploy because of the time it spends downloading the data.
6388+
"""
6389+
6390+
enable_caching: bool
6391+
6392+
63806393
class InferenceComponentSpecification(Base):
63816394
"""
63826395
InferenceComponentSpecification
@@ -6389,6 +6402,7 @@ class InferenceComponentSpecification(Base):
63896402
startup_parameters: Settings that take effect while the model container starts up.
63906403
compute_resource_requirements: The compute resources allocated to run the model, plus any adapter models, that you assign to the inference component. Omit this parameter if your request is meant to create an adapter inference component. An adapter inference component is loaded by a base inference component, and it uses the compute resources of the base inference component.
63916404
base_inference_component_name: The name of an existing inference component that is to contain the inference component that you're creating with your request. Specify this parameter only if your request is meant to create an adapter inference component. An adapter inference component contains the path to an adapter model. The purpose of the adapter model is to tailor the inference output of a base foundation model, which is hosted by the base inference component. The adapter inference component uses the compute resources that you assigned to the base inference component. When you create an adapter inference component, use the Container parameter to specify the location of the adapter artifacts. In the parameter value, use the ArtifactUrl parameter of the InferenceComponentContainerSpecification data type. Before you can create an adapter inference component, you must have an existing inference component that contains the foundation model that you want to adapt.
6405+
data_cache_config: Settings that affect how the inference component caches data.
63926406
"""
63936407

63946408
model_name: Optional[Union[str, object]] = Unassigned()
@@ -6398,6 +6412,7 @@ class InferenceComponentSpecification(Base):
63986412
Unassigned()
63996413
)
64006414
base_inference_component_name: Optional[str] = Unassigned()
6415+
data_cache_config: Optional[InferenceComponentDataCacheConfig] = Unassigned()
64016416

64026417

64036418
class InferenceComponentRuntimeConfig(Base):
@@ -7803,7 +7818,7 @@ class ProcessingS3Input(Base):
78037818
local_path: The local path in your container where you want Amazon SageMaker to write input data to. LocalPath is an absolute path to the input data and must begin with /opt/ml/processing/. LocalPath is a required parameter when AppManaged is False (default).
78047819
s3_data_type: Whether you use an S3Prefix or a ManifestFile for the data type. If you choose S3Prefix, S3Uri identifies a key name prefix. Amazon SageMaker uses all objects with the specified key name prefix for the processing job. If you choose ManifestFile, S3Uri identifies an object that is a manifest file containing a list of object keys that you want Amazon SageMaker to use for the processing job.
78057820
s3_input_mode: Whether to use File or Pipe input mode. In File mode, Amazon SageMaker copies the data from the input source onto the local ML storage volume before starting your processing container. This is the most commonly used input mode. In Pipe mode, Amazon SageMaker streams input data from the source directly to your processing container into named pipes without using the ML storage volume.
7806-
s3_data_distribution_type: Whether to distribute the data from Amazon S3 to all processing instances with FullyReplicated, or whether the data from Amazon S3 is shared by Amazon S3 key, downloading one shard of data to each processing instance.
7821+
s3_data_distribution_type: Whether to distribute the data from Amazon S3 to all processing instances with FullyReplicated, or whether the data from Amazon S3 is sharded by Amazon S3 key, downloading one shard of data to each processing instance.
78077822
s3_compression_type: Whether to GZIP-decompress the data in Amazon S3 as it is streamed into the processing container. Gzip can only be used when Pipe mode is specified as the S3InputMode. In Pipe mode, Amazon SageMaker streams input data from the source directly to your container without using the EBS volume.
78087823
"""
78097824

@@ -9284,6 +9299,19 @@ class InferenceComponentContainerSpecificationSummary(Base):
92849299
environment: Optional[Dict[str, str]] = Unassigned()
92859300

92869301

9302+
class InferenceComponentDataCacheConfigSummary(Base):
9303+
"""
9304+
InferenceComponentDataCacheConfigSummary
9305+
Settings that affect how the inference component caches data.
9306+
9307+
Attributes
9308+
----------------------
9309+
enable_caching: Indicates whether the inference component caches model artifacts as part of the auto scaling process.
9310+
"""
9311+
9312+
enable_caching: bool
9313+
9314+
92879315
class InferenceComponentSpecificationSummary(Base):
92889316
"""
92899317
InferenceComponentSpecificationSummary
@@ -9296,6 +9324,7 @@ class InferenceComponentSpecificationSummary(Base):
92969324
startup_parameters: Settings that take effect while the model container starts up.
92979325
compute_resource_requirements: The compute resources allocated to run the model, plus any adapter models, that you assign to the inference component.
92989326
base_inference_component_name: The name of the base inference component that contains this inference component.
9327+
data_cache_config: Settings that affect how the inference component caches data.
92999328
"""
93009329

93019330
model_name: Optional[Union[str, object]] = Unassigned()
@@ -9305,6 +9334,7 @@ class InferenceComponentSpecificationSummary(Base):
93059334
Unassigned()
93069335
)
93079336
base_inference_component_name: Optional[str] = Unassigned()
9337+
data_cache_config: Optional[InferenceComponentDataCacheConfigSummary] = Unassigned()
93089338

93099339

93109340
class InferenceComponentRuntimeConfigSummary(Base):

0 commit comments

Comments
 (0)