diff --git a/README.rst b/README.rst index 732d0d0..99dedd1 100644 --- a/README.rst +++ b/README.rst @@ -17,6 +17,19 @@ Handles multi-API versions of Azure Storage Data Plane originally from https://g Change Log ---------- +0.7.0 ++++++ +* blob: + - Support v2020-06-12(12.8.1) + - Support v2020-10-02(12.9.0) +* fileshare: + - Minor fix for 2020-04-08(12.5.0) + - Support v2020-10-02(12.6.0) +* filedatalake: + - Minor fix for 2020-02-10(12.3.1) + - Support v2020-06-12(12.5.0) +* queue: Minor fix for 2018-03-28(12.1.6) + 0.6.2 +++++ * Fix import issue for filedatalake diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/__init__.py new file mode 100644 index 0000000..9164961 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/__init__.py @@ -0,0 +1,233 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import os + +from typing import Union, Iterable, AnyStr, IO, Any, Dict # pylint: disable=unused-import +from ._version import VERSION +from ._blob_client import BlobClient +from ._container_client import ContainerClient +from ._blob_service_client import BlobServiceClient +from ._lease import BlobLeaseClient +from ._download import StorageStreamDownloader +from ._quick_query_helper import BlobQueryReader +from ._shared_access_signature import generate_account_sas, generate_container_sas, generate_blob_sas +from ._shared.policies import ExponentialRetry, LinearRetry +from ._shared.response_handlers import PartialBatchErrorException +from ._shared.models import( + LocationMode, + ResourceTypes, + AccountSasPermissions, + StorageErrorCode, + UserDelegationKey +) +from ._generated.models import ( + RehydratePriority +) +from ._models import ( + BlobType, + BlockState, + StandardBlobTier, + PremiumPageBlobTier, + SequenceNumberAction, + PublicAccess, + BlobAnalyticsLogging, + Metrics, + RetentionPolicy, + StaticWebsite, + CorsRule, + ContainerProperties, + BlobProperties, + FilteredBlob, + LeaseProperties, + ContentSettings, + CopyProperties, + BlobBlock, + PageRange, + AccessPolicy, + ContainerSasPermissions, + BlobSasPermissions, + CustomerProvidedEncryptionKey, + ContainerEncryptionScope, + BlobQueryError, + DelimitedJsonDialect, + DelimitedTextDialect, + ArrowDialect, + ArrowType, + ObjectReplicationPolicy, + ObjectReplicationRule +) +from ._list_blobs_helper import BlobPrefix + +__version__ = VERSION + + +def upload_blob_to_url( + blob_url, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + credential=None, # type: Any + **kwargs): + # type: (...) -> Dict[str, Any] + """Upload data to a given URL + + The data will be uploaded as a block blob. + + :param str blob_url: + The full URI to the blob. This can also include a SAS token. + :param data: + The data to upload. This can be bytes, text, an iterable or a file-like object. + :type data: bytes or str or Iterable + :param credential: + The credentials with which to authenticate. This is optional if the + blob URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword bool overwrite: + Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob_to_url will overwrite any existing data. If set to False, the + operation will fail with a ResourceExistsError. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword dict(str,str) metadata: + Name-value pairs associated with the blob as metadata. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword str encoding: + Encoding to use if text is supplied as input. Defaults to UTF-8. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: dict(str, Any) + """ + with BlobClient.from_blob_url(blob_url, credential=credential) as client: + return client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) + + +def _download_to_stream(client, handle, **kwargs): + """Download data to specified open file-handle.""" + stream = client.download_blob(**kwargs) + stream.readinto(handle) + + +def download_blob_from_url( + blob_url, # type: str + output, # type: str + credential=None, # type: Any + **kwargs): + # type: (...) -> None + """Download the contents of a blob to a local file or stream. + + :param str blob_url: + The full URI to the blob. This can also include a SAS token. + :param output: + Where the data should be downloaded to. This could be either a file path to write to, + or an open IO handle to write to. + :type output: str or writable stream. + :param credential: + The credentials with which to authenticate. This is optional if the + blob URL already has a SAS token or the blob is public. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword bool overwrite: + Whether the local file should be overwritten if it already exists. The default value is + `False` - in which case a ValueError will be raised if the file already exists. If set to + `True`, an attempt will be made to write to the existing file. If a stream handle is passed + in, this value is ignored. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :keyword int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :rtype: None + """ + overwrite = kwargs.pop('overwrite', False) + with BlobClient.from_blob_url(blob_url, credential=credential) as client: + if hasattr(output, 'write'): + _download_to_stream(client, output, **kwargs) + else: + if not overwrite and os.path.isfile(output): + raise ValueError("The file '{}' already exists.".format(output)) + with open(output, 'wb') as file_handle: + _download_to_stream(client, file_handle, **kwargs) + + +__all__ = [ + 'upload_blob_to_url', + 'download_blob_from_url', + 'BlobServiceClient', + 'ContainerClient', + 'BlobClient', + 'BlobType', + 'BlobLeaseClient', + 'StorageErrorCode', + 'UserDelegationKey', + 'ExponentialRetry', + 'LinearRetry', + 'LocationMode', + 'BlockState', + 'StandardBlobTier', + 'PremiumPageBlobTier', + 'SequenceNumberAction', + 'PublicAccess', + 'BlobAnalyticsLogging', + 'Metrics', + 'RetentionPolicy', + 'StaticWebsite', + 'CorsRule', + 'ContainerProperties', + 'BlobProperties', + 'BlobPrefix', + 'FilteredBlob', + 'LeaseProperties', + 'ContentSettings', + 'CopyProperties', + 'BlobBlock', + 'PageRange', + 'AccessPolicy', + 'ContainerSasPermissions', + 'BlobSasPermissions', + 'ResourceTypes', + 'AccountSasPermissions', + 'StorageStreamDownloader', + 'CustomerProvidedEncryptionKey', + 'RehydratePriority', + 'generate_account_sas', + 'generate_container_sas', + 'generate_blob_sas', + 'PartialBatchErrorException', + 'ContainerEncryptionScope', + 'BlobQueryError', + 'DelimitedJsonDialect', + 'DelimitedTextDialect', + 'ArrowDialect', + 'ArrowType', + 'BlobQueryReader', + 'ObjectReplicationPolicy', + 'ObjectReplicationRule' +] diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_blob_client.py b/azure/multiapi/storagev2/blob/v2020_06_12/_blob_client.py new file mode 100644 index 0000000..e82c04f --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_blob_client.py @@ -0,0 +1,3788 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines,no-self-use +from functools import partial +from io import BytesIO +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, + TYPE_CHECKING +) + +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore + +import six +from azure.core.pipeline import Pipeline +from azure.core.tracing.decorator import distributed_trace +from azure.core.exceptions import ResourceNotFoundError, HttpResponseError, ResourceExistsError + +from ._shared import encode_base64 +from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query, TransportWrapper +from ._shared.encryption import generate_blob_encryption_data +from ._shared.uploads import IterStreamer +from ._shared.request_handlers import ( + add_metadata_headers, get_length, read_length, + validate_and_format_range_headers) +from ._shared.response_handlers import return_response_headers, process_storage_error, return_headers_and_deserialized +from ._generated import AzureBlobStorage +from ._generated.models import ( # pylint: disable=unused-import + DeleteSnapshotsOptionType, + BlobHTTPHeaders, + BlockLookupList, + AppendPositionAccessConditions, + SequenceNumberAccessConditions, + QueryRequest, + CpkInfo) +from ._serialize import ( + get_modify_conditions, + get_source_conditions, + get_cpk_scope_info, + get_api_version, + serialize_blob_tags_header, + serialize_blob_tags, + serialize_query_format, get_access_conditions +) +from ._deserialize import get_page_ranges_result, deserialize_blob_properties, deserialize_blob_stream, parse_tags, \ + deserialize_pipeline_response_into_cls +from ._quick_query_helper import BlobQueryReader +from ._upload_helpers import ( + upload_block_blob, + upload_append_blob, + upload_page_blob, _any_conditions) +from ._models import BlobType, BlobBlock, BlobProperties, BlobQueryError +from ._download import StorageStreamDownloader +from ._lease import BlobLeaseClient + +if TYPE_CHECKING: + from datetime import datetime + from ._generated.models import BlockList + from ._models import ( # pylint: disable=unused-import + ContentSettings, + PremiumPageBlobTier, + StandardBlobTier, + SequenceNumberAction + ) + +_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( + 'The require_encryption flag is set, but encryption is not supported' + ' for this method.') + + +class BlobClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods + """A client to interact with a specific blob, although that blob may not yet exist. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the blob, + use the :func:`from_blob_url` classmethod. + :param container_name: The container name for the blob. + :type container_name: str + :param blob_name: The name of the blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob_name: str + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_blob_client] + :end-before: [END create_blob_client] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a URL to a public blob (no auth needed). + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_blob_client_sas_url] + :end-before: [END create_blob_client_sas_url] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a SAS URL to a blob. + """ + def __init__( + self, account_url, # type: str + container_name, # type: str + blob_name, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + + if not (container_name and blob_name): + raise ValueError("Please specify a container name and blob name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + path_snapshot, sas_token = parse_query(parsed_url.query) + + self.container_name = container_name + self.blob_name = blob_name + try: + self.snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + self.snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + self.snapshot = snapshot or path_snapshot + + # This parameter is used for the hierarchy traversal. Give precedence to credential. + self._raw_credential = credential if credential else sas_token + self._query_str, credential = self._format_query_string(sas_token, credential, snapshot=self.snapshot) + super(BlobClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) + self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) + default_api_version = self._client._config.version # pylint: disable=protected-access + self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access + + def _format_url(self, hostname): + container_name = self.container_name + if isinstance(container_name, six.text_type): + container_name = container_name.encode('UTF-8') + return "{}://{}/{}/{}{}".format( + self.scheme, + hostname, + quote(container_name), + quote(self.blob_name, safe='~/'), + self._query_str) + + def _encode_source_url(self, source_url): + parsed_source_url = urlparse(source_url) + source_scheme = parsed_source_url.scheme + source_hostname = parsed_source_url.netloc.rstrip('/') + source_path = unquote(parsed_source_url.path) + source_query = parsed_source_url.query + result = ["{}://{}{}".format(source_scheme, source_hostname, quote(source_path, safe='~/'))] + if source_query: + result.append(source_query) + return '?'.join(result) + + @classmethod + def from_blob_url(cls, blob_url, credential=None, snapshot=None, **kwargs): + # type: (str, Optional[Any], Optional[Union[str, Dict[str, Any]]], Any) -> BlobClient + """Create BlobClient from a blob url. This doesn't support customized blob url with '/' in blob name. + + :param str blob_url: + The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be + either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. + :type blob_url: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. If specified, this will override + the snapshot in the url. + :returns: A Blob client. + :rtype: ~azure.storage.blob.BlobClient + """ + try: + if not blob_url.lower().startswith('http'): + blob_url = "https://" + blob_url + except AttributeError: + raise ValueError("Blob URL must be a string.") + parsed_url = urlparse(blob_url.rstrip('/')) + + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(blob_url)) + + account_path = "" + if ".core." in parsed_url.netloc: + # .core. is indicating non-customized url. Blob name with directory info can also be parsed. + path_blob = parsed_url.path.lstrip('/').split('/', 1) + elif "localhost" in parsed_url.netloc or "127.0.0.1" in parsed_url.netloc: + path_blob = parsed_url.path.lstrip('/').split('/', 2) + account_path += '/' + path_blob[0] + else: + # for customized url. blob name that has directory info cannot be parsed. + path_blob = parsed_url.path.lstrip('/').split('/') + if len(path_blob) > 2: + account_path = "/" + "/".join(path_blob[:-2]) + account_url = "{}://{}{}?{}".format( + parsed_url.scheme, + parsed_url.netloc.rstrip('/'), + account_path, + parsed_url.query) + container_name, blob_name = unquote(path_blob[-2]), unquote(path_blob[-1]) + if not container_name or not blob_name: + raise ValueError("Invalid URL. Provide a blob_url with a valid blob and container name.") + + path_snapshot, _ = parse_query(parsed_url.query) + if snapshot: + try: + path_snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + path_snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + path_snapshot = snapshot + + return cls( + account_url, container_name=container_name, blob_name=blob_name, + snapshot=path_snapshot, credential=credential, **kwargs + ) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + container_name, # type: str + blob_name, # type: str + snapshot=None, # type: Optional[str] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> BlobClient + """Create BlobClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param container_name: The container name for the blob. + :type container_name: str + :param blob_name: The name of the blob with which to interact. + :type blob_name: str + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :returns: A Blob client. + :rtype: ~azure.storage.blob.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START auth_from_connection_string_blob] + :end-before: [END auth_from_connection_string_blob] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, container_name=container_name, blob_name=blob_name, + snapshot=snapshot, credential=credential, **kwargs + ) + + @distributed_trace + def get_account_information(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """Gets information related to the storage account in which the blob resides. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + """ + try: + return self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _upload_blob_options( # pylint:disable=too-many-statements + self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] + length=None, # type: Optional[int] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption and not self.key_encryption_key: + raise ValueError("Encryption required but no key was provided.") + encryption_options = { + 'required': self.require_encryption, + 'key': self.key_encryption_key, + 'resolver': self.key_resolver_function, + } + if self.key_encryption_key is not None: + cek, iv, encryption_data = generate_blob_encryption_data(self.key_encryption_key) + encryption_options['cek'] = cek + encryption_options['vector'] = iv + encryption_options['data'] = encryption_data + + encoding = kwargs.pop('encoding', 'UTF-8') + if isinstance(data, six.text_type): + data = data.encode(encoding) # type: ignore + if length is None: + length = get_length(data) + if isinstance(data, bytes): + data = data[:length] + + if isinstance(data, bytes): + stream = BytesIO(data) + elif hasattr(data, 'read'): + stream = data + elif hasattr(data, '__iter__'): + stream = IterStreamer(data, encoding=encoding) + else: + raise TypeError("Unsupported data type: {}".format(type(data))) + + validate_content = kwargs.pop('validate_content', False) + content_settings = kwargs.pop('content_settings', None) + overwrite = kwargs.pop('overwrite', False) + max_concurrency = kwargs.pop('max_concurrency', 1) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + kwargs['cpk_info'] = cpk_info + + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) + kwargs['modified_access_conditions'] = get_modify_conditions(kwargs) + kwargs['cpk_scope_info'] = get_cpk_scope_info(kwargs) + if content_settings: + kwargs['blob_headers'] = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=content_settings.content_md5, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + kwargs['blob_tags_string'] = serialize_blob_tags_header(kwargs.pop('tags', None)) + kwargs['stream'] = stream + kwargs['length'] = length + kwargs['overwrite'] = overwrite + kwargs['headers'] = headers + kwargs['validate_content'] = validate_content + kwargs['blob_settings'] = self._config + kwargs['max_concurrency'] = max_concurrency + kwargs['encryption_options'] = encryption_options + if blob_type == BlobType.BlockBlob: + kwargs['client'] = self._client.block_blob + kwargs['data'] = data + elif blob_type == BlobType.PageBlob: + kwargs['client'] = self._client.page_blob + elif blob_type == BlobType.AppendBlob: + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + kwargs['client'] = self._client.append_blob + else: + raise ValueError("Unsupported BlobType: {}".format(blob_type)) + return kwargs + + def _upload_blob_from_url_options(self, source_url, **kwargs): + # type: (...) -> Dict[str, Any] + tier = kwargs.pop('standard_blob_tier', None) + overwrite = kwargs.pop('overwrite', False) + content_settings = kwargs.pop('content_settings', None) + if content_settings: + kwargs['blob_http_headers'] = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=None, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'content_length': 0, + 'copy_source_blob_properties': kwargs.pop('include_source_blob_properties', True), + 'source_content_md5': kwargs.pop('source_content_md5', None), + 'copy_source': source_url, + 'modified_access_conditions': get_modify_conditions(kwargs), + 'blob_tags_string': serialize_blob_tags_header(kwargs.pop('tags', None)), + 'cls': return_response_headers, + 'lease_access_conditions': get_access_conditions(kwargs.pop('destination_lease', None)), + 'tier': tier.value if tier else None, + 'source_modified_access_conditions': get_source_conditions(kwargs), + 'cpk_info': cpk_info, + 'cpk_scope_info': get_cpk_scope_info(kwargs) + } + options.update(kwargs) + if not overwrite and not _any_conditions(**options): # pylint: disable=protected-access + options['modified_access_conditions'].if_none_match = '*' + return options + + @distributed_trace + def upload_blob_from_url(self, source_url, **kwargs): + # type: (str, Any) -> Dict[str, Any] + """ + Creates a new Block Blob where the content of the blob is read from a given URL. + The content of an existing blob is overwritten with the new blob. + + :param str source_url: + A URL of up to 2 KB in length that specifies a file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. + :keyword bool include_source_blob_properties: + Indicates if properties from the source blob should be copied. Defaults to True. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + :paramtype tags: dict(str, str) + :keyword bytearray source_content_md5: + Specify the md5 that is used to verify the integrity of the source bytes. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword destination_lease: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + """ + options = self._upload_blob_from_url_options( + source_url=self._encode_source_url(source_url), + **kwargs) + try: + return self._client.block_blob.put_blob_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def upload_blob( # pylint: disable=too-many-locals + self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] + length=None, # type: Optional[int] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Any + """Creates a new blob from a data source with automatic chunking. + + :param data: The blob data to upload. + :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be + either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. The exception to the above is with Append + blob types: if set to False and the data already exists, an error will not be raised + and the data will be appended to the existing blob. If set overwrite=True, then the existing + append blob will be deleted, and a new one created. Defaults to False. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, upload_blob only succeeds if the + blob's lease is active and matches this ID. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int max_concurrency: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world.py + :start-after: [START upload_a_blob] + :end-before: [END upload_a_blob] + :language: python + :dedent: 12 + :caption: Upload a blob to the container. + """ + options = self._upload_blob_options( + data, + blob_type=blob_type, + length=length, + metadata=metadata, + **kwargs) + if blob_type == BlobType.BlockBlob: + return upload_block_blob(**options) + if blob_type == BlobType.PageBlob: + return upload_page_blob(**options) + return upload_append_blob(**options) + + def _download_blob_options(self, offset=None, length=None, **kwargs): + # type: (Optional[int], Optional[int], **Any) -> Dict[str, Any] + if self.require_encryption and not self.key_encryption_key: + raise ValueError("Encryption required but no key was provided.") + if length is not None and offset is None: + raise ValueError("Offset value must not be None if length is set.") + if length is not None: + length = offset + length - 1 # Service actually uses an end-range inclusive index + + validate_content = kwargs.pop('validate_content', False) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'clients': self._client, + 'config': self._config, + 'start_range': offset, + 'end_range': length, + 'version_id': kwargs.pop('version_id', None), + 'validate_content': validate_content, + 'encryption_options': { + 'required': self.require_encryption, + 'key': self.key_encryption_key, + 'resolver': self.key_resolver_function}, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_info': cpk_info, + 'cls': kwargs.pop('cls', None) or deserialize_blob_stream, + 'max_concurrency':kwargs.pop('max_concurrency', 1), + 'encoding': kwargs.pop('encoding', None), + 'timeout': kwargs.pop('timeout', None), + 'name': self.blob_name, + 'container': self.container_name} + options.update(kwargs) + return options + + @distributed_trace + def download_blob(self, offset=None, length=None, **kwargs): + # type: (Optional[int], Optional[int], **Any) -> StorageStreamDownloader + """Downloads a blob to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the blob into + a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. + + :param int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to download. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, download_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword str encoding: + Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.blob.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world.py + :start-after: [START download_a_blob] + :end-before: [END download_a_blob] + :language: python + :dedent: 12 + :caption: Download a blob. + """ + options = self._download_blob_options( + offset=offset, + length=length, + **kwargs) + return StorageStreamDownloader(**options) + + def _quick_query_options(self, query_expression, + **kwargs): + # type: (str, **Any) -> Dict[str, Any] + delimiter = '\n' + input_format = kwargs.pop('blob_format', None) + if input_format: + try: + delimiter = input_format.lineterminator + except AttributeError: + try: + delimiter = input_format.delimiter + except AttributeError: + raise ValueError("The Type of blob_format can only be DelimitedTextDialect or DelimitedJsonDialect") + output_format = kwargs.pop('output_format', None) + if output_format: + try: + delimiter = output_format.lineterminator + except AttributeError: + try: + delimiter = output_format.delimiter + except AttributeError: + pass + else: + output_format = input_format + query_request = QueryRequest( + expression=query_expression, + input_serialization=serialize_query_format(input_format), + output_serialization=serialize_query_format(output_format) + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo( + encryption_key=cpk.key_value, + encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm + ) + options = { + 'query_request': query_request, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_info': cpk_info, + 'snapshot': self.snapshot, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_headers_and_deserialized, + } + options.update(kwargs) + return options, delimiter + + @distributed_trace + def query_blob(self, query_expression, **kwargs): + # type: (str, **Any) -> BlobQueryReader + """Enables users to select/project on blob/or blob snapshot data by providing simple query expressions. + This operations returns a BlobQueryReader, users need to use readall() or readinto() to get query data. + + :param str query_expression: + Required. a query statement. + :keyword Callable[~azure.storage.blob.BlobQueryError] on_error: + A function to be called on any processing errors returned by the service. + :keyword blob_format: + Optional. Defines the serialization of the data currently stored in the blob. The default is to + treat the blob data as CSV data formatted in the default dialect. This can be overridden with + a custom DelimitedTextDialect, or alternatively a DelimitedJsonDialect. + :paramtype blob_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect + :keyword output_format: + Optional. Defines the output serialization for the data stream. By default the data will be returned + as it is represented in the blob. By providing an output format, the blob data will be reformatted + according to that profile. This value can be a DelimitedTextDialect or a DelimitedJsonDialect. + :paramtype output_format: ~azure.storage.blob.DelimitedTextDialect, ~azure.storage.blob.DelimitedJsonDialect + or list[~azure.storage.blob.ArrowDialect] + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A streaming object (BlobQueryReader) + :rtype: ~azure.storage.blob.BlobQueryReader + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_query.py + :start-after: [START query] + :end-before: [END query] + :language: python + :dedent: 4 + :caption: select/project on blob/or blob snapshot data by providing simple query expressions. + """ + errors = kwargs.pop("on_error", None) + error_cls = kwargs.pop("error_cls", BlobQueryError) + encoding = kwargs.pop("encoding", None) + options, delimiter = self._quick_query_options(query_expression, **kwargs) + try: + headers, raw_response_body = self._client.blob.query(**options) + except HttpResponseError as error: + process_storage_error(error) + return BlobQueryReader( + name=self.blob_name, + container=self.container_name, + errors=errors, + record_delimiter=delimiter, + encoding=encoding, + headers=headers, + response=raw_response_body, + error_cls=error_cls) + + @staticmethod + def _generic_delete_blob_options(delete_snapshots=False, **kwargs): + # type: (bool, **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if delete_snapshots: + delete_snapshots = DeleteSnapshotsOptionType(delete_snapshots) + options = { + 'timeout': kwargs.pop('timeout', None), + 'snapshot': kwargs.pop('snapshot', None), # this is added for delete_blobs + 'delete_snapshots': delete_snapshots or None, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions} + options.update(kwargs) + return options + + def _delete_blob_options(self, delete_snapshots=False, **kwargs): + # type: (bool, **Any) -> Dict[str, Any] + if self.snapshot and delete_snapshots: + raise ValueError("The delete_snapshots option cannot be used with a specific snapshot.") + options = self._generic_delete_blob_options(delete_snapshots, **kwargs) + options['snapshot'] = self.snapshot + options['version_id'] = kwargs.pop('version_id', None) + options['blob_delete_type'] = kwargs.pop('blob_delete_type', None) + return options + + @distributed_trace + def delete_blob(self, delete_snapshots=None, **kwargs): + # type: (str, **Any) -> None + """Marks the specified blob for deletion. + + The blob is later deleted during garbage collection. + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the delete_blob() + operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob + and retains the blob for a specified number of days. + After the specified number of days, the blob's data is removed from the service during garbage collection. + Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` + option. Soft-deleted blob can be restored using :func:`undelete` operation. + + :param str delete_snapshots: + Required if the blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword lease: + Required if the blob has an active lease. If specified, delete_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world.py + :start-after: [START delete_blob] + :end-before: [END delete_blob] + :language: python + :dedent: 12 + :caption: Delete a blob. + """ + options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) + try: + self._client.blob.delete(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def undelete_blob(self, **kwargs): + # type: (**Any) -> None + """Restores soft-deleted blobs or snapshots. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START undelete_blob] + :end-before: [END undelete_blob] + :language: python + :dedent: 8 + :caption: Undeleting a blob. + """ + try: + self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace() + def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a blob exists with the defined parameters, and returns + False otherwise. + + :kwarg str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to check if it exists. + :kwarg int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + try: + self._client.blob.get_properties( + snapshot=self.snapshot, + **kwargs) + return True + # Encrypted with CPK + except ResourceExistsError: + return True + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceNotFoundError: + return False + + @distributed_trace + def get_blob_properties(self, **kwargs): + # type: (**Any) -> BlobProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the blob. It does not return the content of the blob. + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to get properties. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: BlobProperties + :rtype: ~azure.storage.blob.BlobProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START get_blob_properties] + :end-before: [END get_blob_properties] + :language: python + :dedent: 8 + :caption: Getting the properties for a blob. + """ + # TODO: extract this out as _get_blob_properties_options + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + try: + cls_method = kwargs.pop('cls', None) + if cls_method: + kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method) + blob_props = self._client.blob.get_properties( + timeout=kwargs.pop('timeout', None), + version_id=kwargs.pop('version_id', None), + snapshot=self.snapshot, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=kwargs.pop('cls', None) or deserialize_blob_properties, + cpk_info=cpk_info, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + blob_props.name = self.blob_name + if isinstance(blob_props, BlobProperties): + blob_props.container = self.container_name + blob_props.snapshot = self.snapshot + return blob_props # type: ignore + + def _set_http_headers_options(self, content_settings=None, **kwargs): + # type: (Optional[ContentSettings], **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + blob_headers = None + if content_settings: + blob_headers = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=content_settings.content_md5, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + options = { + 'timeout': kwargs.pop('timeout', None), + 'blob_http_headers': blob_headers, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def set_http_headers(self, content_settings=None, **kwargs): + # type: (Optional[ContentSettings], **Any) -> None + """Sets system properties on the blob. + + If one property is set for the content_settings, all properties will be overridden. + + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + options = self._set_http_headers_options(content_settings=content_settings, **kwargs) + try: + return self._client.blob.set_http_headers(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _set_blob_metadata_options(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + 'headers': headers} + options.update(kwargs) + return options + + @distributed_trace + def set_blob_metadata(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] + """Sets user-defined metadata for the blob as one or more name-value pairs. + + :param metadata: + Dict containing name and value pairs. Each call to this operation + replaces all existing metadata attached to the blob. To remove all + metadata from the blob, call this operation with no metadata headers. + :type metadata: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + """ + options = self._set_blob_metadata_options(metadata=metadata, **kwargs) + try: + return self._client.blob.set_metadata(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _create_page_blob_options( # type: ignore + self, size, # type: int + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + blob_headers = None + if content_settings: + blob_headers = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=content_settings.content_md5, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + + sequence_number = kwargs.pop('sequence_number', None) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + if premium_page_blob_tier: + try: + headers['x-ms-access-tier'] = premium_page_blob_tier.value # type: ignore + except AttributeError: + headers['x-ms-access-tier'] = premium_page_blob_tier # type: ignore + + blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) + + options = { + 'content_length': 0, + 'blob_content_length': size, + 'blob_sequence_number': sequence_number, + 'blob_http_headers': blob_headers, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'blob_tags_string': blob_tags_string, + 'cls': return_response_headers, + 'headers': headers} + options.update(kwargs) + return options + + @distributed_trace + def create_page_blob( # type: ignore + self, size, # type: int + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Creates a new Page Blob of the specified size. + + :param int size: + This specifies the maximum size for the page blob, up to 1 TB. + The page blob size must be aligned to a 512-byte boundary. + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword int sequence_number: + Only for Page blobs. The sequence number is a user-controlled value that you can use to + track requests. The value of the sequence number must be between 0 + and 2^63 - 1.The default value is 0. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict[str, Any] + """ + options = self._create_page_blob_options( + size, + content_settings=content_settings, + metadata=metadata, + premium_page_blob_tier=premium_page_blob_tier, + **kwargs) + try: + return self._client.page_blob.create(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _create_append_blob_options(self, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + blob_headers = None + if content_settings: + blob_headers = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=content_settings.content_md5, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) + + options = { + 'content_length': 0, + 'blob_http_headers': blob_headers, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'blob_tags_string': blob_tags_string, + 'cls': return_response_headers, + 'headers': headers} + options.update(kwargs) + return options + + @distributed_trace + def create_append_blob(self, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] + """Creates a new Append Blob. + + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict[str, Any] + """ + options = self._create_append_blob_options( + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return self._client.append_blob.create(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _create_snapshot_options(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + 'headers': headers} + options.update(kwargs) + return options + + @distributed_trace + def create_snapshot(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] + """Creates a snapshot of the blob. + + A snapshot is a read-only version of a blob that's taken at a point in time. + It can be read, copied, or deleted, but not modified. Snapshots provide a way + to back up a blob as it appears at a moment in time. + + A snapshot of a blob has the same name as the base blob from which the snapshot + is taken, with a DateTime value appended to indicate the time at which the + snapshot was taken. + + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + + .. versionadded:: 12.4.0 + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START create_blob_snapshot] + :end-before: [END create_blob_snapshot] + :language: python + :dedent: 8 + :caption: Create a snapshot of the blob. + """ + options = self._create_snapshot_options(metadata=metadata, **kwargs) + try: + return self._client.blob.create_snapshot(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _start_copy_from_url_options(self, source_url, metadata=None, incremental_copy=False, **kwargs): + # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Any] + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + if 'source_lease' in kwargs: + source_lease = kwargs.pop('source_lease') + try: + headers['x-ms-source-lease-id'] = source_lease.id # type: str + except AttributeError: + headers['x-ms-source-lease-id'] = source_lease + + tier = kwargs.pop('premium_page_blob_tier', None) or kwargs.pop('standard_blob_tier', None) + + if kwargs.get('requires_sync'): + headers['x-ms-requires-sync'] = str(kwargs.pop('requires_sync')) + + timeout = kwargs.pop('timeout', None) + dest_mod_conditions = get_modify_conditions(kwargs) + blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) + + options = { + 'copy_source': source_url, + 'seal_blob': kwargs.pop('seal_destination_blob', None), + 'timeout': timeout, + 'modified_access_conditions': dest_mod_conditions, + 'blob_tags_string': blob_tags_string, + 'headers': headers, + 'cls': return_response_headers, + } + if not incremental_copy: + source_mod_conditions = get_source_conditions(kwargs) + dest_access_conditions = get_access_conditions(kwargs.pop('destination_lease', None)) + options['source_modified_access_conditions'] = source_mod_conditions + options['lease_access_conditions'] = dest_access_conditions + options['tier'] = tier.value if tier else None + options.update(kwargs) + return options + + @distributed_trace + def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): + # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Union[str, datetime]] + """Copies a blob asynchronously. + + This operation returns a copy operation + object that can be used to wait on the completion of the operation, + as well as check status or abort the copy operation. + The Blob service copies blobs on a best-effort basis. + + The source blob for a copy operation may be a block blob, an append blob, + or a page blob. If the destination blob already exists, it must be of the + same blob type as the source blob. Any existing destination blob will be + overwritten. The destination blob cannot be modified while a copy operation + is in progress. + + When copying from a page blob, the Blob service creates a destination page + blob of the source blob's length, initially containing all zeroes. Then + the source page ranges are enumerated, and non-empty ranges are copied. + + For a block blob or an append blob, the Blob service creates a committed + blob of zero length before returning from this operation. When copying + from a block blob, all committed blocks and their block IDs are copied. + Uncommitted blocks are not copied. At the end of the copy operation, the + destination blob will have the same committed block count as the source. + + When copying from an append blob, all committed blocks are copied. At the + end of the copy operation, the destination blob will have the same committed + block count as the source. + + For all blob types, you can call status() on the returned polling object + to check the status of the copy operation, or wait() to block until the + operation is complete. The final blob will be committed when the copy completes. + + :param str source_url: + A URL of up to 2 KB in length that specifies a file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :param metadata: + Name-value pairs associated with the blob as metadata. If no name-value + pairs are specified, the operation will copy the metadata from the + source blob or file to the destination blob. If one or more name-value + pairs are specified, the destination blob is created with the specified + metadata, and metadata is not copied from the source blob or file. + :type metadata: dict(str, str) + :param bool incremental_copy: + Copies the snapshot of the source page blob to a destination page blob. + The snapshot is copied such that only the differential changes between + the previously copied snapshot are transferred to the destination. + The copied snapshots are complete copies of the original snapshot and + can be read or copied from as usual. Defaults to False. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source + blob has been modified since the specified date/time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source blob + has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has been modified since the specified date/time. + If the destination blob has not been modified, the Blob service returns + status code 412 (Precondition Failed). + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has not been modified since the specified + date/time. If the destination blob has been modified, the Blob service + returns status code 412 (Precondition Failed). + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword destination_lease: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword source_lease: + Specify this to perform the Copy Blob operation only if + the lease ID given matches the active lease ID of the source blob. + :paramtype source_lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword bool seal_destination_blob: + Seal the destination append blob. This operation is only for append blob. + + .. versionadded:: 12.4.0 + + :keyword bool requires_sync: + Enforces that the service will not return a response until the copy is complete. + :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). + :rtype: dict[str, str or ~datetime.datetime] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START copy_blob_from_url] + :end-before: [END copy_blob_from_url] + :language: python + :dedent: 12 + :caption: Copy a blob from a URL. + """ + options = self._start_copy_from_url_options( + source_url=self._encode_source_url(source_url), + metadata=metadata, + incremental_copy=incremental_copy, + **kwargs) + try: + if incremental_copy: + return self._client.page_blob.copy_incremental(**options) + return self._client.blob.start_copy_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + def _abort_copy_options(self, copy_id, **kwargs): + # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + try: + copy_id = copy_id.copy.id + except AttributeError: + try: + copy_id = copy_id['copy_id'] + except TypeError: + pass + options = { + 'copy_id': copy_id, + 'lease_access_conditions': access_conditions, + 'timeout': kwargs.pop('timeout', None)} + options.update(kwargs) + return options + + @distributed_trace + def abort_copy(self, copy_id, **kwargs): + # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> None + """Abort an ongoing copy operation. + + This will leave a destination blob with zero length and full metadata. + This will raise an error if the copy operation has already ended. + + :param copy_id: + The copy operation to abort. This can be either an ID string, or an + instance of BlobProperties. + :type copy_id: str or ~azure.storage.blob.BlobProperties + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START abort_copy_blob_from_url] + :end-before: [END abort_copy_blob_from_url] + :language: python + :dedent: 12 + :caption: Abort copying a blob from URL. + """ + options = self._abort_copy_options(copy_id, **kwargs) + try: + self._client.blob.abort_copy_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): + # type: (int, Optional[str], **Any) -> BlobLeaseClient + """Requests a new lease. + + If the blob does not have an active lease, the Blob + Service creates a lease on the blob and returns a new lease. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Blob Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A BlobLeaseClient object. + :rtype: ~azure.storage.blob.BlobLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START acquire_lease_on_blob] + :end-before: [END acquire_lease_on_blob] + :language: python + :dedent: 8 + :caption: Acquiring a lease on a blob. + """ + lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore + lease.acquire(lease_duration=lease_duration, **kwargs) + return lease + + @distributed_trace + def set_standard_blob_tier(self, standard_blob_tier, **kwargs): + # type: (Union[str, StandardBlobTier], Any) -> None + """This operation sets the tier on a block blob. + + A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + :param standard_blob_tier: + Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', + 'Archive'. The hot tier is optimized for storing data that is accessed + frequently. The cool storage tier is optimized for storing data that + is infrequently accessed and stored for at least a month. The archive + tier is optimized for storing data that is rarely accessed and stored + for at least six months with flexible latency requirements. + :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to download. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if standard_blob_tier is None: + raise ValueError("A StandardBlobTier must be specified") + if self.snapshot and kwargs.get('version_id'): + raise ValueError("Snapshot and version_id cannot be set at the same time") + try: + self._client.blob.set_tier( + tier=standard_blob_tier, + snapshot=self.snapshot, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + def _stage_block_options( + self, block_id, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + block_id = encode_base64(str(block_id)) + if isinstance(data, six.text_type): + data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + if length is None: + length = get_length(data) + if length is None: + length, data = read_length(data) + if isinstance(data, bytes): + data = data[:length] + + validate_content = kwargs.pop('validate_content', False) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'block_id': block_id, + 'content_length': length, + 'body': data, + 'transactional_content_md5': None, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'validate_content': validate_content, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + } + options.update(kwargs) + return options + + @distributed_trace + def stage_block( + self, block_id, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Creates a new block to be committed as part of a blob. + + :param str block_id: A string value that identifies the block. + The string should be less than or equal to 64 bytes in size. + For a given blob, the block_id must be the same size for each block. + :param data: The blob data. + :param int length: Size of the block. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword str encoding: + Defaults to UTF-8. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob property dict. + :rtype: dict[str, Any] + """ + options = self._stage_block_options( + block_id, + data, + length=length, + **kwargs) + try: + return self._client.block_blob.stage_block(**options) + except HttpResponseError as error: + process_storage_error(error) + + def _stage_block_from_url_options( + self, block_id, # type: str + source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + source_content_md5=None, # type: Optional[Union[bytes, bytearray]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if source_length is not None and source_offset is None: + raise ValueError("Source offset value must not be None if length is set.") + if source_length is not None: + source_length = source_offset + source_length - 1 + block_id = encode_base64(str(block_id)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + range_header = None + if source_offset is not None: + range_header, _ = validate_and_format_range_headers(source_offset, source_length) + + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'block_id': block_id, + 'content_length': 0, + 'source_url': source_url, + 'source_range': range_header, + 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + } + options.update(kwargs) + return options + + @distributed_trace + def stage_block_from_url( + self, block_id, # type: str + source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + source_content_md5=None, # type: Optional[Union[bytes, bytearray]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Creates a new block to be committed as part of a blob where + the contents are read from a URL. + + :param str block_id: A string value that identifies the block. + The string should be less than or equal to 64 bytes in size. + For a given blob, the block_id must be the same size for each block. + :param str source_url: The URL. + :param int source_offset: + Start of byte range to use for the block. + Must be set if source length is provided. + :param int source_length: The size of the block in bytes. + :param bytearray source_content_md5: + Specify the md5 calculated for the range of + bytes that must be read from the copy source. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob property dict. + :rtype: dict[str, Any] + """ + options = self._stage_block_from_url_options( + block_id, + source_url=self._encode_source_url(source_url), + source_offset=source_offset, + source_length=source_length, + source_content_md5=source_content_md5, + **kwargs) + try: + return self._client.block_blob.stage_block_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + def _get_block_list_result(self, blocks): + # type: (BlockList) -> Tuple[List[BlobBlock], List[BlobBlock]] + committed = [] # type: List + uncommitted = [] # type: List + if blocks.committed_blocks: + committed = [BlobBlock._from_generated(b) for b in blocks.committed_blocks] # pylint: disable=protected-access + if blocks.uncommitted_blocks: + uncommitted = [BlobBlock._from_generated(b) for b in blocks.uncommitted_blocks] # pylint: disable=protected-access + return committed, uncommitted + + @distributed_trace + def get_block_list(self, block_list_type="committed", **kwargs): + # type: (Optional[str], **Any) -> Tuple[List[BlobBlock], List[BlobBlock]] + """The Get Block List operation retrieves the list of blocks that have + been uploaded as part of a block blob. + + :param str block_list_type: + Specifies whether to return the list of committed + blocks, the list of uncommitted blocks, or both lists together. + Possible values include: 'committed', 'uncommitted', 'all' + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A tuple of two lists - committed and uncommitted blocks + :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + try: + blocks = self._client.block_blob.get_block_list( + list_type=block_list_type, + snapshot=self.snapshot, + timeout=kwargs.pop('timeout', None), + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return self._get_block_list_result(blocks) + + def _commit_block_list_options( # type: ignore + self, block_list, # type: List[BlobBlock] + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) + for block in block_list: + try: + if block.state.value == 'committed': + block_lookup.committed.append(encode_base64(str(block.id))) + elif block.state.value == 'uncommitted': + block_lookup.uncommitted.append(encode_base64(str(block.id))) + else: + block_lookup.latest.append(encode_base64(str(block.id))) + except AttributeError: + block_lookup.latest.append(encode_base64(str(block))) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + blob_headers = None + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if content_settings: + blob_headers = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=content_settings.content_md5, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + + validate_content = kwargs.pop('validate_content', False) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + tier = kwargs.pop('standard_blob_tier', None) + blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) + + options = { + 'blocks': block_lookup, + 'blob_http_headers': blob_headers, + 'lease_access_conditions': access_conditions, + 'timeout': kwargs.pop('timeout', None), + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers, + 'validate_content': validate_content, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'tier': tier.value if tier else None, + 'blob_tags_string': blob_tags_string, + 'headers': headers + } + options.update(kwargs) + return options + + @distributed_trace + def commit_block_list( # type: ignore + self, block_list, # type: List[BlobBlock] + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """The Commit Block List operation writes a blob by specifying the list of + block IDs that make up the blob. + + :param list block_list: + List of Blockblobs. + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict[str, str] + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._commit_block_list_options( + block_list, + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return self._client.block_blob.commit_block_list(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): + # type: (Union[str, PremiumPageBlobTier], **Any) -> None + """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. + + :param premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if premium_page_blob_tier is None: + raise ValueError("A PremiumPageBlobTier must be specified") + try: + self._client.blob.set_tier( + tier=premium_page_blob_tier, + timeout=kwargs.pop('timeout', None), + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + def _set_blob_tags_options(self, tags=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + tags = serialize_blob_tags(tags) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + options = { + 'tags': tags, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def set_blob_tags(self, tags=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. + Each call to this operation replaces all existing tags attached to the blob. To remove all + tags from the blob, call this operation with no tags set. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :param tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + :type tags: dict(str, str) + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to add tags to. + :keyword bool validate_content: + If true, calculates an MD5 hash of the tags content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + options = self._set_blob_tags_options(tags=tags, **kwargs) + try: + return self._client.blob.set_tags(**options) + except HttpResponseError as error: + process_storage_error(error) + + def _get_blob_tags_options(self, **kwargs): + # type: (**Any) -> Dict[str, str] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + options = { + 'version_id': kwargs.pop('version_id', None), + 'snapshot': self.snapshot, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_headers_and_deserialized} + return options + + @distributed_trace + def get_blob_tags(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """The Get Tags operation enables users to get tags on a blob or specific blob version, or snapshot. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to add tags to. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, str] + """ + options = self._get_blob_tags_options(**kwargs) + try: + _, tags = self._client.blob.get_tags(**options) + return parse_tags(tags) # pylint: disable=protected-access + except HttpResponseError as error: + process_storage_error(error) + + def _get_page_ranges_options( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if length is not None and offset is None: + raise ValueError("Offset value must not be None if length is set.") + if length is not None: + length = offset + length - 1 # Reformat to an inclusive range index + page_range, _ = validate_and_format_range_headers( + offset, length, start_range_required=False, end_range_required=False, align_to_page=True + ) + options = { + 'snapshot': self.snapshot, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'range': page_range} + if previous_snapshot_diff: + try: + options['prevsnapshot'] = previous_snapshot_diff.snapshot # type: ignore + except AttributeError: + try: + options['prevsnapshot'] = previous_snapshot_diff['snapshot'] # type: ignore + except TypeError: + options['prevsnapshot'] = previous_snapshot_diff + options.update(kwargs) + return options + + @distributed_trace + def get_page_ranges( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] + **kwargs + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a Page Blob or snapshot + of a page blob. + + :param int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param str previous_snapshot_diff: + The snapshot diff parameter that contains an opaque DateTime value that + specifies a previous blob snapshot to be compared + against a more recent snapshot or the current blob. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. + The first element are filled page ranges, the 2nd element is cleared page ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_page_ranges_options( + offset=offset, + length=length, + previous_snapshot_diff=previous_snapshot_diff, + **kwargs) + try: + if previous_snapshot_diff: + ranges = self._client.page_blob.get_page_ranges_diff(**options) + else: + ranges = self._client.page_blob.get_page_ranges(**options) + except HttpResponseError as error: + process_storage_error(error) + return get_page_ranges_result(ranges) + + @distributed_trace + def get_page_range_diff_for_managed_disk( + self, previous_snapshot_url, # type: str + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a managed disk or snapshot. + + .. note:: + This operation is only available for managed disk accounts. + + .. versionadded:: 12.2.0 + This operation was introduced in API version '2019-07-07'. + + :param previous_snapshot_url: + Specifies the URL of a previous snapshot of the managed disk. + The response will only contain pages that were changed between the target blob and + its previous snapshot. + :param int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. + The first element are filled page ranges, the 2nd element is cleared page ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_page_ranges_options( + offset=offset, + length=length, + prev_snapshot_url=previous_snapshot_url, + **kwargs) + try: + ranges = self._client.page_blob.get_page_ranges_diff(**options) + except HttpResponseError as error: + process_storage_error(error) + return get_page_ranges_result(ranges) + + def _set_sequence_number_options(self, sequence_number_action, sequence_number=None, **kwargs): + # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if sequence_number_action is None: + raise ValueError("A sequence number action must be specified") + options = { + 'sequence_number_action': sequence_number_action, + 'timeout': kwargs.pop('timeout', None), + 'blob_sequence_number': sequence_number, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def set_sequence_number(self, sequence_number_action, sequence_number=None, **kwargs): + # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Union[str, datetime]] + """Sets the blob sequence number. + + :param str sequence_number_action: + This property indicates how the service should modify the blob's sequence + number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. + :param str sequence_number: + This property sets the blob's sequence number. The sequence number is a + user-controlled property that you can use to track requests and manage + concurrency issues. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._set_sequence_number_options( + sequence_number_action, sequence_number=sequence_number, **kwargs) + try: + return self._client.page_blob.update_sequence_number(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _resize_blob_options(self, size, **kwargs): + # type: (int, **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if size is None: + raise ValueError("A content length must be specified for a Page Blob.") + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'blob_content_length': size, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def resize_blob(self, size, **kwargs): + # type: (int, **Any) -> Dict[str, Union[str, datetime]] + """Resizes a page blob to the specified size. + + If the specified value is less than the current size of the blob, + then all pages above the specified value are cleared. + + :param int size: + Size used to resize blob. Maximum size for a page blob is up to 1 TB. + The page blob size must be aligned to a 512-byte boundary. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._resize_blob_options(size, **kwargs) + try: + return self._client.page_blob.resize(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _upload_page_options( # type: ignore + self, page, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + if isinstance(page, six.text_type): + page = page.encode(kwargs.pop('encoding', 'UTF-8')) + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 page size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 page size") + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) # type: ignore + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + seq_conditions = SequenceNumberAccessConditions( + if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), + if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), + if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) + ) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + validate_content = kwargs.pop('validate_content', False) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'body': page[:length], + 'content_length': length, + 'transactional_content_md5': None, + 'timeout': kwargs.pop('timeout', None), + 'range': content_range, + 'lease_access_conditions': access_conditions, + 'sequence_number_access_conditions': seq_conditions, + 'modified_access_conditions': mod_conditions, + 'validate_content': validate_content, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def upload_page( # type: ignore + self, page, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """The Upload Pages operation writes a range of pages to a page blob. + + :param bytes page: + Content of the page. + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._upload_page_options( + page=page, + offset=offset, + length=length, + **kwargs) + try: + return self._client.page_blob.upload_pages(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _upload_pages_from_url_options( # type: ignore + self, source_url, # type: str + offset, # type: int + length, # type: int + source_offset, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + # TODO: extract the code to a method format_range + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 page size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 page size") + if source_offset is None or offset % 512 != 0: + raise ValueError("source_offset must be an integer that aligns with 512 page size") + + # Format range + end_range = offset + length - 1 + destination_range = 'bytes={0}-{1}'.format(offset, end_range) + source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) # should subtract 1 here? + + seq_conditions = SequenceNumberAccessConditions( + if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), + if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), + if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + source_mod_conditions = get_source_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + source_content_md5 = kwargs.pop('source_content_md5', None) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'source_url': source_url, + 'content_length': 0, + 'source_range': source_range, + 'range': destination_range, + 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'sequence_number_access_conditions': seq_conditions, + 'modified_access_conditions': mod_conditions, + 'source_modified_access_conditions': source_mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def upload_pages_from_url(self, source_url, # type: str + offset, # type: int + length, # type: int + source_offset, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """ + The Upload Pages operation writes a range of pages to a page blob where + the contents are read from a URL. + + :param str source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (length-offset). + :keyword bytes source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + options = self._upload_pages_from_url_options( + source_url=self._encode_source_url(source_url), + offset=offset, + length=length, + source_offset=source_offset, + **kwargs + ) + try: + return self._client.page_blob.upload_pages_from_url(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _clear_page_options(self, offset, length, **kwargs): + # type: (int, int, **Any) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + seq_conditions = SequenceNumberAccessConditions( + if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), + if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), + if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) + ) + mod_conditions = get_modify_conditions(kwargs) + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 page size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 page size") + end_range = length + offset - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'content_length': 0, + 'timeout': kwargs.pop('timeout', None), + 'range': content_range, + 'lease_access_conditions': access_conditions, + 'sequence_number_access_conditions': seq_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def clear_page(self, offset, length, **kwargs): + # type: (int, int, **Any) -> Dict[str, Union[str, datetime]] + """Clears a range of pages. + + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._clear_page_options(offset, length, **kwargs) + try: + return self._client.page_blob.clear_pages(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _append_block_options( # type: ignore + self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + if isinstance(data, six.text_type): + data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore + if length is None: + length = get_length(data) + if length is None: + length, data = read_length(data) + if length == 0: + return {} + if isinstance(data, bytes): + data = data[:length] + + appendpos_condition = kwargs.pop('appendpos_condition', None) + maxsize_condition = kwargs.pop('maxsize_condition', None) + validate_content = kwargs.pop('validate_content', False) + append_conditions = None + if maxsize_condition or appendpos_condition is not None: + append_conditions = AppendPositionAccessConditions( + max_size=maxsize_condition, + append_position=appendpos_condition + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'body': data, + 'content_length': length, + 'timeout': kwargs.pop('timeout', None), + 'transactional_content_md5': None, + 'lease_access_conditions': access_conditions, + 'append_position_access_conditions': append_conditions, + 'modified_access_conditions': mod_conditions, + 'validate_content': validate_content, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def append_block( # type: ignore + self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """Commits a new block of data to the end of the existing append blob. + + :param data: + Content of the block. This can be bytes, text, an iterable or a file-like object. + :type data: bytes or str or Iterable + :param int length: + Size of the block in bytes. + :keyword bool validate_content: + If true, calculates an MD5 hash of the block content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._append_block_options( + data, + length=length, + **kwargs + ) + try: + return self._client.append_blob.append_block(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _append_block_from_url_options( # type: ignore + self, copy_source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + # If end range is provided, start range must be provided + if source_length is not None and source_offset is None: + raise ValueError("source_offset should also be specified if source_length is specified") + # Format based on whether length is present + source_range = None + if source_length is not None: + end_range = source_offset + source_length - 1 + source_range = 'bytes={0}-{1}'.format(source_offset, end_range) + elif source_offset is not None: + source_range = "bytes={0}-".format(source_offset) + + appendpos_condition = kwargs.pop('appendpos_condition', None) + maxsize_condition = kwargs.pop('maxsize_condition', None) + source_content_md5 = kwargs.pop('source_content_md5', None) + append_conditions = None + if maxsize_condition or appendpos_condition is not None: + append_conditions = AppendPositionAccessConditions( + max_size=maxsize_condition, + append_position=appendpos_condition + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + source_mod_conditions = get_source_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'source_url': copy_source_url, + 'content_length': 0, + 'source_range': source_range, + 'source_content_md5': source_content_md5, + 'transactional_content_md5': None, + 'lease_access_conditions': access_conditions, + 'append_position_access_conditions': append_conditions, + 'modified_access_conditions': mod_conditions, + 'source_modified_access_conditions': source_mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + 'timeout': kwargs.pop('timeout', None)} + options.update(kwargs) + return options + + @distributed_trace + def append_block_from_url(self, copy_source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """ + Creates a new block to be committed as part of a blob, where the contents are read from a source url. + + :param str copy_source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int source_offset: + This indicates the start of the range of bytes (inclusive) that has to be taken from the copy source. + :param int source_length: + This indicates the end of the range of bytes that has to be taken from the copy source. + :keyword bytearray source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the + AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + options = self._append_block_from_url_options( + copy_source_url=self._encode_source_url(copy_source_url), + source_offset=source_offset, + source_length=source_length, + **kwargs + ) + try: + return self._client.append_blob.append_block_from_url(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _seal_append_blob_options(self, **kwargs): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + appendpos_condition = kwargs.pop('appendpos_condition', None) + append_conditions = None + if appendpos_condition is not None: + append_conditions = AppendPositionAccessConditions( + append_position=appendpos_condition + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + options = { + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'append_position_access_conditions': append_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def seal_append_blob(self, **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """The Seal operation seals the Append Blob to make it read-only. + + .. versionadded:: 12.4.0 + + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._seal_append_blob_options(**kwargs) + try: + return self._client.append_blob.seal(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def _get_container_client(self): # pylint: disable=client-method-missing-kwargs + # type: (...) -> ContainerClient + """Get a client to interact with the blob's parent container. + + The container need not already exist. Defaults to current blob's credentials. + + :returns: A ContainerClient. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START get_container_client_from_blob_client] + :end-before: [END get_container_client_from_blob_client] + :language: python + :dedent: 8 + :caption: Get container client from blob object. + """ + from ._container_client import ContainerClient + if not isinstance(self._pipeline._transport, TransportWrapper): # pylint: disable = protected-access + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + else: + _pipeline = self._pipeline # pylint: disable = protected-access + return ContainerClient( + "{}://{}".format(self.scheme, self.primary_hostname), container_name=self.container_name, + credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_blob_service_client.py b/azure/multiapi/storagev2/blob/v2020_06_12/_blob_service_client.py new file mode 100644 index 0000000..6016a8a --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_blob_service_client.py @@ -0,0 +1,732 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, + TYPE_CHECKING +) + + +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse # type: ignore + +from azure.core.paging import ItemPaged +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline import Pipeline +from azure.core.tracing.decorator import distributed_trace + +from ._shared.models import LocationMode +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.parser import _to_utc_datetime +from ._shared.response_handlers import return_response_headers, process_storage_error, \ + parse_to_internal_user_delegation_key +from ._generated import AzureBlobStorage +from ._generated.models import StorageServiceProperties, KeyInfo +from ._container_client import ContainerClient +from ._blob_client import BlobClient +from ._models import ContainerPropertiesPaged +from ._list_blobs_helper import FilteredBlobPaged +from ._serialize import get_api_version +from ._deserialize import service_stats_deserialize, service_properties_deserialize + +if TYPE_CHECKING: + from datetime import datetime + from ._shared.models import UserDelegationKey + from ._lease import BlobLeaseClient + from ._models import ( + ContainerProperties, + BlobProperties, + PublicAccess, + BlobAnalyticsLogging, + Metrics, + CorsRule, + RetentionPolicy, + StaticWebsite, + FilteredBlob + ) + + +class BlobServiceClient(StorageAccountHostsMixin): + """A client to interact with the Blob Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete containers within the account. + For operations relating to a specific container or blob, clients for those entities + can also be retrieved using the `get_client` functions. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URL to the blob storage account. Any other entities included + in the URL path (e.g. container or blob) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_blob_service_client] + :end-before: [END create_blob_service_client] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient with account url and credential. + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_blob_service_client_oauth] + :end-before: [END create_blob_service_client_oauth] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient with Azure Identity credentials. + """ + + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + _, sas_token = parse_query(parsed_url.query) + self._query_str, credential = self._format_query_string(sas_token, credential) + super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) + self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) + default_api_version = self._client._config.version # pylint: disable=protected-access + self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + return "{}://{}/{}".format(self.scheme, hostname, self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> BlobServiceClient + """Create BlobServiceClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :returns: A Blob service client. + :rtype: ~azure.storage.blob.BlobServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START auth_from_connection_string] + :end-before: [END auth_from_connection_string] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient from a connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls(account_url, credential=credential, **kwargs) + + @distributed_trace + def get_user_delegation_key(self, key_start_time, # type: datetime + key_expiry_time, # type: datetime + **kwargs # type: Any + ): + # type: (...) -> UserDelegationKey + """ + Obtain a user delegation key for the purpose of signing SAS tokens. + A token credential must be present on the service object for this request to succeed. + + :param ~datetime.datetime key_start_time: + A DateTime value. Indicates when the key becomes valid. + :param ~datetime.datetime key_expiry_time: + A DateTime value. Indicates when the key stops being valid. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The user delegation key. + :rtype: ~azure.storage.blob.UserDelegationKey + """ + key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) + timeout = kwargs.pop('timeout', None) + try: + user_delegation_key = self._client.service.get_user_delegation_key(key_info=key_info, + timeout=timeout, + **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore + + @distributed_trace + def get_account_information(self, **kwargs): + # type: (Any) -> Dict[str, str] + """Gets information related to the storage account. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START get_blob_service_account_info] + :end-before: [END get_blob_service_account_info] + :language: python + :dedent: 8 + :caption: Getting account information for the blob service. + """ + try: + return self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_service_stats(self, **kwargs): + # type: (**Any) -> Dict[str, Any] + """Retrieves statistics related to replication for the Blob service. + + It is only available when read-access geo-redundant replication is enabled for + the storage account. + + With geo-redundant replication, Azure Storage maintains your data durable + in two locations. In both locations, Azure Storage constantly maintains + multiple healthy replicas of your data. The location where you read, + create, update, or delete data is the primary storage account location. + The primary location exists in the region you choose at the time you + create an account via the Azure Management Azure classic portal, for + example, North Central US. The location to which your data is replicated + is the secondary location. The secondary location is automatically + determined based on the location of the primary; it is in a second data + center that resides in the same region as the primary location. Read-only + access is available from the secondary location, if read-access geo-redundant + replication is enabled for your storage account. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The blob service stats. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START get_blob_service_stats] + :end-before: [END get_blob_service_stats] + :language: python + :dedent: 8 + :caption: Getting service stats for the blob service. + """ + timeout = kwargs.pop('timeout', None) + try: + stats = self._client.service.get_statistics( # type: ignore + timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) + return service_stats_deserialize(stats) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_service_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An object containing blob service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START get_blob_service_properties] + :end-before: [END get_blob_service_properties] + :language: python + :dedent: 8 + :caption: Getting service properties for the blob service. + """ + timeout = kwargs.pop('timeout', None) + try: + service_props = self._client.service.get_properties(timeout=timeout, **kwargs) + return service_properties_deserialize(service_props) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_service_properties( + self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] + hour_metrics=None, # type: Optional[Metrics] + minute_metrics=None, # type: Optional[Metrics] + cors=None, # type: Optional[List[CorsRule]] + target_version=None, # type: Optional[str] + delete_retention_policy=None, # type: Optional[RetentionPolicy] + static_website=None, # type: Optional[StaticWebsite] + **kwargs + ): + # type: (...) -> None + """Sets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + If an element (e.g. analytics_logging) is left as None, the + existing settings on the service for that functionality are preserved. + + :param analytics_logging: + Groups the Azure Analytics Logging settings. + :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for blobs. + :type hour_metrics: ~azure.storage.blob.Metrics + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for blobs. + :type minute_metrics: ~azure.storage.blob.Metrics + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list[~azure.storage.blob.CorsRule] + :param str target_version: + Indicates the default version to use for requests if an incoming + request's version is not specified. + :param delete_retention_policy: + The delete retention policy specifies whether to retain deleted blobs. + It also specifies the number of days and versions of blob to keep. + :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy + :param static_website: + Specifies whether the static website feature is enabled, + and if yes, indicates the index document and 404 error document to use. + :type static_website: ~azure.storage.blob.StaticWebsite + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START set_blob_service_properties] + :end-before: [END set_blob_service_properties] + :language: python + :dedent: 8 + :caption: Setting service properties for the blob service. + """ + if all(parameter is None for parameter in [ + analytics_logging, hour_metrics, minute_metrics, cors, + target_version, delete_retention_policy, static_website]): + raise ValueError("set_service_properties should be called with at least one parameter") + + props = StorageServiceProperties( + logging=analytics_logging, + hour_metrics=hour_metrics, + minute_metrics=minute_metrics, + cors=cors, + default_service_version=target_version, + delete_retention_policy=delete_retention_policy, + static_website=static_website + ) + timeout = kwargs.pop('timeout', None) + try: + self._client.service.set_properties(props, timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_containers( + self, name_starts_with=None, # type: Optional[str] + include_metadata=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> ItemPaged[ContainerProperties] + """Returns a generator to list the containers under the specified account. + + The generator will lazily follow the continuation tokens returned by + the service and stop when all containers have been returned. + + :param str name_starts_with: + Filters the results to return only containers whose names + begin with the specified prefix. + :param bool include_metadata: + Specifies that container metadata to be returned in the response. + The default value is `False`. + :keyword bool include_deleted: + Specifies that deleted containers to be returned in the response. This is for container restore enabled + account. The default value is `False`. + .. versionadded:: 12.4.0 + :keyword int results_per_page: + The maximum number of container names to retrieve per API + call. If the request does not specify the server will return up to 5,000 items. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) of ContainerProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.ContainerProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_list_containers] + :end-before: [END bsc_list_containers] + :language: python + :dedent: 12 + :caption: Listing the containers in the blob service. + """ + include = ['metadata'] if include_metadata else [] + include_deleted = kwargs.pop('include_deleted', None) + if include_deleted: + include.append("deleted") + + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.service.list_containers_segment, + prefix=name_starts_with, + include=include, + timeout=timeout, + **kwargs) + return ItemPaged( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + page_iterator_class=ContainerPropertiesPaged + ) + + @distributed_trace + def find_blobs_by_tags(self, filter_expression, **kwargs): + # type: (str, **Any) -> ItemPaged[FilteredBlob] + """The Filter Blobs operation enables callers to list blobs across all + containers whose tags match a given search expression. Filter blobs + searches across all containers within a storage account but can be + scoped within the expression to a single container. + + :param str filter_expression: + The expression to find blobs whose tags matches the specified condition. + eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" + To specify a container, eg. "@container='containerName' and \"Name\"='C'" + :keyword int results_per_page: + The max result per page when paginating. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.FilteredBlob] + """ + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.service.filter_blobs, + where=filter_expression, + timeout=timeout, + **kwargs) + return ItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=FilteredBlobPaged) + + @distributed_trace + def create_container( + self, name, # type: str + metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[Union[PublicAccess, str]] + **kwargs + ): + # type: (...) -> ContainerClient + """Creates a new container under the specified account. + + If the container with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created container. + + :param str name: The name of the container to create. + :param metadata: + A dict with name-value pairs to associate with the + container as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + Possible values include: 'container', 'blob'. + :type public_access: str or ~azure.storage.blob.PublicAccess + :keyword container_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + + .. versionadded:: 12.2.0 + + :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_create_container] + :end-before: [END bsc_create_container] + :language: python + :dedent: 12 + :caption: Creating a container in the blob service. + """ + container = self.get_container_client(name) + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + container.create_container( + metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) + return container + + @distributed_trace + def delete_container( + self, container, # type: Union[ContainerProperties, str] + lease=None, # type: Optional[Union[BlobLeaseClient, str]] + **kwargs + ): + # type: (...) -> None + """Marks the specified container for deletion. + + The container and any blobs contained within it are later deleted during garbage collection. + If the container is not found, a ResourceNotFoundError will be raised. + + :param container: + The container to delete. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :param lease: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_delete_container] + :end-before: [END bsc_delete_container] + :language: python + :dedent: 12 + :caption: Deleting a container in the blob service. + """ + container = self.get_container_client(container) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + container.delete_container( # type: ignore + lease=lease, + timeout=timeout, + **kwargs) + + @distributed_trace + def _rename_container(self, name, new_name, **kwargs): + # type: (str, str, **Any) -> ContainerClient + """Renames a container. + + Operation is successful only if the source container exists. + + :param str name: + The name of the container to rename. + :param str new_name: + The new container name the user wants to rename to. + :keyword lease: + Specify this to perform only if the lease ID given + matches the active lease ID of the source container. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.ContainerClient + """ + renamed_container = self.get_container_client(new_name) + lease = kwargs.pop('lease', None) + try: + kwargs['source_lease_id'] = lease.id # type: str + except AttributeError: + kwargs['source_lease_id'] = lease + try: + renamed_container._client.container.rename(name, **kwargs) # pylint: disable = protected-access + return renamed_container + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): + # type: (str, str, **Any) -> ContainerClient + """Restores soft-deleted container. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :param str deleted_container_name: + Specifies the name of the deleted container to restore. + :param str deleted_container_version: + Specifies the version of the deleted container to restore. + :keyword str new_name: + The new name for the deleted container to be restored to. + If not specified deleted_container_name will be used as the restored container name. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.ContainerClient + """ + new_name = kwargs.pop('new_name', None) + container = self.get_container_client(new_name or deleted_container_name) + try: + container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access + deleted_container_version=deleted_container_version, + timeout=kwargs.pop('timeout', None), **kwargs) + return container + except HttpResponseError as error: + process_storage_error(error) + + def get_container_client(self, container): + # type: (Union[ContainerProperties, str]) -> ContainerClient + """Get a client to interact with the specified container. + + The container need not already exist. + + :param container: + The container. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :returns: A ContainerClient. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_get_container_client] + :end-before: [END bsc_get_container_client] + :language: python + :dedent: 8 + :caption: Getting the container client to interact with a specific container. + """ + try: + container_name = container.name + except AttributeError: + container_name = container + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ContainerClient( + self.url, container_name=container_name, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def get_blob_client( + self, container, # type: Union[ContainerProperties, str] + blob, # type: Union[BlobProperties, str] + snapshot=None # type: Optional[Union[Dict[str, Any], str]] + ): + # type: (...) -> BlobClient + """Get a client to interact with the specified blob. + + The blob need not already exist. + + :param container: + The container that the blob is in. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :param blob: + The blob with which to interact. This can either be the name of the blob, + or an instance of BlobProperties. + :type blob: str or ~azure.storage.blob.BlobProperties + :param snapshot: + The optional blob snapshot on which to operate. This can either be the ID of the snapshot, + or a dictionary output returned by :func:`~azure.storage.blob.BlobClient.create_snapshot()`. + :type snapshot: str or dict(str, Any) + :returns: A BlobClient. + :rtype: ~azure.storage.blob.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_get_blob_client] + :end-before: [END bsc_get_blob_client] + :language: python + :dedent: 12 + :caption: Getting the blob client to interact with a specific blob. + """ + try: + container_name = container.name + except AttributeError: + container_name = container + try: + blob_name = blob.name + except AttributeError: + blob_name = blob + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return BlobClient( # type: ignore + self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_container_client.py b/azure/multiapi/storagev2/blob/v2020_06_12/_container_client.py new file mode 100644 index 0000000..d2caf7a --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_container_client.py @@ -0,0 +1,1551 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, AnyStr, Dict, List, Tuple, IO, Iterator, + TYPE_CHECKING +) + + +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore + +import six + +from azure.core import MatchConditions +from azure.core.exceptions import HttpResponseError, ResourceNotFoundError +from azure.core.paging import ItemPaged +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import Pipeline +from azure.core.pipeline.transport import HttpRequest + +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.request_handlers import add_metadata_headers, serialize_iso +from ._shared.response_handlers import ( + process_storage_error, + return_response_headers, + return_headers_and_deserialized) +from ._generated import AzureBlobStorage +from ._generated.models import SignedIdentifier +from ._deserialize import deserialize_container_properties +from ._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions +from ._models import ( # pylint: disable=unused-import + ContainerProperties, + BlobProperties, + BlobType) +from ._list_blobs_helper import BlobPrefix, BlobPropertiesPaged +from ._lease import BlobLeaseClient +from ._blob_client import BlobClient + +if TYPE_CHECKING: + from azure.core.pipeline.transport import HttpTransport, HttpResponse # pylint: disable=ungrouped-imports + from azure.core.pipeline.policies import HTTPPolicy # pylint: disable=ungrouped-imports + from datetime import datetime + from ._models import ( # pylint: disable=unused-import + PublicAccess, + AccessPolicy, + ContentSettings, + StandardBlobTier, + PremiumPageBlobTier) + + +def _get_blob_name(blob): + """Return the blob name. + + :param blob: A blob string or BlobProperties + :rtype: str + """ + try: + return blob.get('name') + except AttributeError: + return blob + + +class ContainerClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods + """A client to interact with a specific container, although that container + may not yet exist. + + For operations relating to a specific blob within this container, a blob client can be + retrieved using the :func:`~get_blob_client` function. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the container, + use the :func:`from_container_url` classmethod. + :param container_name: + The name of the container for the blob. + :type container_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START create_container_client_from_service] + :end-before: [END create_container_client_from_service] + :language: python + :dedent: 8 + :caption: Get a ContainerClient from an existing BlobServiceClient. + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START create_container_client_sasurl] + :end-before: [END create_container_client_sasurl] + :language: python + :dedent: 8 + :caption: Creating the container client directly. + """ + def __init__( + self, account_url, # type: str + container_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Container URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not container_name: + raise ValueError("Please specify a container name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + _, sas_token = parse_query(parsed_url.query) + self.container_name = container_name + # This parameter is used for the hierarchy traversal. Give precedence to credential. + self._raw_credential = credential if credential else sas_token + self._query_str, credential = self._format_query_string(sas_token, credential) + super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) + self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) + default_api_version = self._client._config.version # pylint: disable=protected-access + self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access + + def _format_url(self, hostname): + container_name = self.container_name + if isinstance(container_name, six.text_type): + container_name = container_name.encode('UTF-8') + return "{}://{}/{}{}".format( + self.scheme, + hostname, + quote(container_name), + self._query_str) + + @classmethod + def from_container_url(cls, container_url, credential=None, **kwargs): + # type: (str, Optional[Any], Any) -> ContainerClient + """Create ContainerClient from a container url. + + :param str container_url: + The full endpoint URL to the Container, including SAS token if used. This could be + either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. + :type container_url: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :returns: A container client. + :rtype: ~azure.storage.blob.ContainerClient + """ + try: + if not container_url.lower().startswith('http'): + container_url = "https://" + container_url + except AttributeError: + raise ValueError("Container URL must be a string.") + parsed_url = urlparse(container_url.rstrip('/')) + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(container_url)) + + container_path = parsed_url.path.lstrip('/').split('/') + account_path = "" + if len(container_path) > 1: + account_path = "/" + "/".join(container_path[:-1]) + account_url = "{}://{}{}?{}".format( + parsed_url.scheme, + parsed_url.netloc.rstrip('/'), + account_path, + parsed_url.query) + container_name = unquote(container_path[-1]) + if not container_name: + raise ValueError("Invalid URL. Please provide a URL with a valid container name") + return cls(account_url, container_name=container_name, credential=credential, **kwargs) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + container_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> ContainerClient + """Create ContainerClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param container_name: + The container name for the blob. + :type container_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :returns: A container client. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START auth_from_connection_string_container] + :end-before: [END auth_from_connection_string_container] + :language: python + :dedent: 8 + :caption: Creating the ContainerClient from a connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, container_name=container_name, credential=credential, **kwargs) + + @distributed_trace + def create_container(self, metadata=None, public_access=None, **kwargs): + # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None + """ + Creates a new container under the specified account. If the container + with the same name already exists, the operation fails. + + :param metadata: + A dict with name_value pairs to associate with the + container as metadata. Example:{'Category':'test'} + :type metadata: dict[str, str] + :param ~azure.storage.blob.PublicAccess public_access: + Possible values include: 'container', 'blob'. + :keyword container_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + + .. versionadded:: 12.2.0 + + :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START create_container] + :end-before: [END create_container] + :language: python + :dedent: 12 + :caption: Creating a container to store blobs. + """ + headers = kwargs.pop('headers', {}) + timeout = kwargs.pop('timeout', None) + headers.update(add_metadata_headers(metadata)) # type: ignore + container_cpk_scope_info = get_container_cpk_scope_info(kwargs) + try: + return self._client.container.create( # type: ignore + timeout=timeout, + access=public_access, + container_cpk_scope_info=container_cpk_scope_info, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def _rename_container(self, new_name, **kwargs): + # type: (str, **Any) -> ContainerClient + """Renames a container. + + Operation is successful only if the source container exists. + + :param str new_name: + The new container name the user wants to rename to. + :keyword lease: + Specify this to perform only if the lease ID given + matches the active lease ID of the source container. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.ContainerClient + """ + lease = kwargs.pop('lease', None) + try: + kwargs['source_lease_id'] = lease.id # type: str + except AttributeError: + kwargs['source_lease_id'] = lease + try: + renamed_container = ContainerClient( + "{}://{}".format(self.scheme, self.primary_hostname), container_name=new_name, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + renamed_container._client.container.rename(self.container_name, **kwargs) # pylint: disable = protected-access + return renamed_container + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def delete_container( + self, **kwargs): + # type: (Any) -> None + """ + Marks the specified container for deletion. The container and any blobs + contained within it are later deleted during garbage collection. + + :keyword lease: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START delete_container] + :end-before: [END delete_container] + :language: python + :dedent: 12 + :caption: Delete a container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + mod_conditions = get_modify_conditions(kwargs) + timeout = kwargs.pop('timeout', None) + try: + self._client.container.delete( + timeout=timeout, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def acquire_lease( + self, lease_duration=-1, # type: int + lease_id=None, # type: Optional[str] + **kwargs): + # type: (...) -> BlobLeaseClient + """ + Requests a new lease. If the container does not have an active lease, + the Blob service creates a lease on the container and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A BlobLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.blob.BlobLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START acquire_lease_on_container] + :end-before: [END acquire_lease_on_container] + :language: python + :dedent: 8 + :caption: Acquiring a lease on the container. + """ + lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) + return lease + + @distributed_trace + def get_account_information(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """Gets information related to the storage account. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + """ + try: + return self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_container_properties(self, **kwargs): + # type: (Any) -> ContainerProperties + """Returns all user-defined metadata and system properties for the specified + container. The data returned does not include the container's list of blobs. + + :keyword lease: + If specified, get_container_properties only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Properties for the specified container within a container object. + :rtype: ~azure.storage.blob.ContainerProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START get_container_properties] + :end-before: [END get_container_properties] + :language: python + :dedent: 12 + :caption: Getting properties on the container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + response = self._client.container.get_properties( + timeout=timeout, + lease_access_conditions=access_conditions, + cls=deserialize_container_properties, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + response.name = self.container_name + return response # type: ignore + + @distributed_trace + def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a container exists and returns False otherwise. + + :kwarg int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + try: + self._client.container.get_properties(**kwargs) + return True + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceNotFoundError: + return False + + @distributed_trace + def set_container_metadata( # type: ignore + self, metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + container. Each call to this operation replaces all existing metadata + attached to the container. To remove all metadata from the container, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the container as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_container_metadata only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Container-updated property dict (Etag and last modified). + :rtype: dict[str, str or datetime] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START set_container_metadata] + :end-before: [END set_container_metadata] + :language: python + :dedent: 12 + :caption: Setting metadata on the container. + """ + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + mod_conditions = get_modify_conditions(kwargs) + timeout = kwargs.pop('timeout', None) + try: + return self._client.container.set_metadata( # type: ignore + timeout=timeout, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def _get_blob_service_client(self): # pylint: disable=client-method-missing-kwargs + # type: (...) -> BlobServiceClient + """Get a client to interact with the container's parent service account. + + Defaults to current container's credentials. + + :returns: A BlobServiceClient. + :rtype: ~azure.storage.blob.BlobServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START get_blob_service_client_from_container_client] + :end-before: [END get_blob_service_client_from_container_client] + :language: python + :dedent: 8 + :caption: Get blob service client from container object. + """ + from ._blob_service_client import BlobServiceClient + if not isinstance(self._pipeline._transport, TransportWrapper): # pylint: disable = protected-access + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + else: + _pipeline = self._pipeline # pylint: disable = protected-access + return BlobServiceClient( + "{}://{}".format(self.scheme, self.primary_hostname), + credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, + _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function, + _pipeline=_pipeline) + + @distributed_trace + def get_container_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the specified container. + The permissions indicate whether container data may be accessed publicly. + + :keyword lease: + If specified, get_container_access_policy only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START get_container_access_policy] + :end-before: [END get_container_access_policy] + :language: python + :dedent: 12 + :caption: Getting the access policy on the container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + response, identifiers = self._client.container.get_access_policy( + timeout=timeout, + lease_access_conditions=access_conditions, + cls=return_headers_and_deserialized, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return { + 'public_access': response.get('blob_public_access'), + 'signed_identifiers': identifiers or [] + } + + @distributed_trace + def set_container_access_policy( + self, signed_identifiers, # type: Dict[str, AccessPolicy] + public_access=None, # type: Optional[Union[str, PublicAccess]] + **kwargs + ): # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the permissions for the specified container or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether blobs in a container may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the container. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] + :param ~azure.storage.blob.PublicAccess public_access: + Possible values include: 'container', 'blob'. + :keyword lease: + Required if the container has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified date/time. + :keyword ~datetime.datetime if_unmodified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Container-updated property dict (Etag and last modified). + :rtype: dict[str, str or ~datetime.datetime] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START set_container_access_policy] + :end-before: [END set_container_access_policy] + :language: python + :dedent: 12 + :caption: Setting access policy on the container. + """ + if len(signed_identifiers) > 5: + raise ValueError( + 'Too many access policies provided. The server does not support setting ' + 'more than 5 access policies on a single resource.') + identifiers = [] + for key, value in signed_identifiers.items(): + if value: + value.start = serialize_iso(value.start) + value.expiry = serialize_iso(value.expiry) + identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore + signed_identifiers = identifiers # type: ignore + lease = kwargs.pop('lease', None) + mod_conditions = get_modify_conditions(kwargs) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + return self._client.container.set_access_policy( + container_acl=signed_identifiers or None, + timeout=timeout, + access=public_access, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_blobs(self, name_starts_with=None, include=None, **kwargs): + # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> ItemPaged[BlobProperties] + """Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. + + :param str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param list[str] or str include: + Specifies one or more additional datasets to include in the response. + Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'tags'. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START list_blobs_in_container] + :end-before: [END list_blobs_in_container] + :language: python + :dedent: 8 + :caption: List the blobs in the container. + """ + if include and not isinstance(include, list): + include = [include] + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.list_blob_flat_segment, + include=include, + timeout=timeout, + **kwargs) + return ItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=BlobPropertiesPaged) + + @distributed_trace + def walk_blobs( + self, name_starts_with=None, # type: Optional[str] + include=None, # type: Optional[Any] + delimiter="/", # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> ItemPaged[BlobProperties] + """Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. This operation will list blobs in accordance with a hierarchy, + as delimited by the specified delimiter character. + + :param str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param list[str] include: + Specifies one or more additional datasets to include in the response. + Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. + :param str delimiter: + When the request includes this parameter, the operation returns a BlobPrefix + element in the response body that acts as a placeholder for all blobs whose + names begin with the same substring up to the appearance of the delimiter + character. The delimiter may be a single character or a string. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] + """ + if include and not isinstance(include, list): + include = [include] + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.list_blob_hierarchy_segment, + delimiter=delimiter, + include=include, + timeout=timeout, + **kwargs) + return BlobPrefix( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + delimiter=delimiter) + + @distributed_trace + def upload_blob( + self, name, # type: Union[str, BlobProperties] + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] + length=None, # type: Optional[int] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> BlobClient + """Creates a new blob from a data source with automatic chunking. + + :param name: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type name: str or ~azure.storage.blob.BlobProperties + :param data: The blob data to upload. + :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be + either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. The exception to the above is with Append + blob types: if set to False and the data already exists, an error will not be raised + and the data will be appended to the existing blob. If set overwrite=True, then the existing + append blob will be deleted, and a new one created. Defaults to False. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the container has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int max_concurrency: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :returns: A BlobClient to interact with the newly uploaded blob. + :rtype: ~azure.storage.blob.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START upload_blob_to_container] + :end-before: [END upload_blob_to_container] + :language: python + :dedent: 8 + :caption: Upload blob to the container. + """ + blob = self.get_blob_client(name) + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + blob.upload_blob( + data, + blob_type=blob_type, + length=length, + metadata=metadata, + timeout=timeout, + encoding=encoding, + **kwargs + ) + return blob + + @distributed_trace + def delete_blob( + self, blob, # type: Union[str, BlobProperties] + delete_snapshots=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> None + """Marks the specified blob or snapshot for deletion. + + The blob is later deleted during garbage collection. + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the delete_blob + operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot + and retains the blob or snapshot for specified number of days. + After specified number of days, blob's data is removed from the service during garbage collection. + Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` + option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` + + :param blob: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob: str or ~azure.storage.blob.BlobProperties + :param str delete_snapshots: + Required if the blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + blob_client = self.get_blob_client(blob) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + blob_client.delete_blob( # type: ignore + delete_snapshots=delete_snapshots, + timeout=timeout, + **kwargs) + + @distributed_trace + def download_blob(self, blob, offset=None, length=None, **kwargs): + # type: (Union[str, BlobProperties], Optional[int], Optional[int], **Any) -> StorageStreamDownloader + """Downloads a blob to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the blob into + a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. + + :param blob: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob: str or ~azure.storage.blob.BlobProperties + :param int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, download_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword str encoding: + Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.blob.StorageStreamDownloader + """ + blob_client = self.get_blob_client(blob) # type: ignore + kwargs.setdefault('merge_span', True) + return blob_client.download_blob(offset=offset, length=length, **kwargs) + + def _generate_delete_blobs_subrequest_options( + self, snapshot=None, + delete_snapshots=None, + lease_access_conditions=None, + modified_access_conditions=None, + **kwargs + ): + """This code is a copy from _generated. + + Once Autorest is able to provide request preparation this code should be removed. + """ + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + # Construct parameters + timeout = kwargs.pop('timeout', None) + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access + if timeout is not None: + query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access + + # Construct headers + header_parameters = {} + if delete_snapshots is not None: + header_parameters['x-ms-delete-snapshots'] = self._client._serialize.header( # pylint: disable=protected-access + "delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._client._serialize.header( # pylint: disable=protected-access + "lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._client._serialize.header( # pylint: disable=protected-access + "if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._client._serialize.header( # pylint: disable=protected-access + "if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._client._serialize.header( # pylint: disable=protected-access + "if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._client._serialize.header( # pylint: disable=protected-access + "if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access + + return query_parameters, header_parameters + + def _generate_delete_blobs_options(self, + *blobs, # type: List[Union[str, BlobProperties, dict]] + **kwargs + ): + timeout = kwargs.pop('timeout', None) + raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) + delete_snapshots = kwargs.pop('delete_snapshots', None) + if_modified_since = kwargs.pop('if_modified_since', None) + if_unmodified_since = kwargs.pop('if_unmodified_since', None) + if_tags_match_condition = kwargs.pop('if_tags_match_condition', None) + kwargs.update({'raise_on_any_failure': raise_on_any_failure, + 'sas': self._query_str.replace('?', '&'), + 'timeout': '&timeout=' + str(timeout) if timeout else "", + 'path': self.container_name, + 'restype': 'restype=container&' + }) + + reqs = [] + for blob in blobs: + blob_name = _get_blob_name(blob) + container_name = self.container_name + + try: + options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access + snapshot=blob.get('snapshot'), + delete_snapshots=delete_snapshots or blob.get('delete_snapshots'), + lease=blob.get('lease_id'), + if_modified_since=if_modified_since or blob.get('if_modified_since'), + if_unmodified_since=if_unmodified_since or blob.get('if_unmodified_since'), + etag=blob.get('etag'), + if_tags_match_condition=if_tags_match_condition or blob.get('if_tags_match_condition'), + match_condition=blob.get('match_condition') or MatchConditions.IfNotModified if blob.get('etag') + else None, + timeout=blob.get('timeout'), + ) + except AttributeError: + options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access + delete_snapshots=delete_snapshots, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_tags_match_condition=if_tags_match_condition + ) + + query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options(**options) + + req = HttpRequest( + "DELETE", + "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), + headers=header_parameters + ) + req.format_parameters(query_parameters) + reqs.append(req) + + return reqs, kwargs + + @distributed_trace + def delete_blobs(self, *blobs, **kwargs): + # type: (...) -> Iterator[HttpResponse] + """Marks the specified blobs or snapshots for deletion. + + The blobs are later deleted during garbage collection. + Note that in order to delete blobs, you must delete all of their + snapshots. You can delete both at the same time with the delete_blobs operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots + and retains the blobs or snapshots for specified number of days. + After specified number of days, blobs' data is removed from the service during garbage collection. + Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` + Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` + + :param blobs: + The blobs to delete. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + snapshot you want to delete: + key: 'snapshot', value type: str + whether to delete snapthots when deleting blob: + key: 'delete_snapshots', value: 'include' or 'only' + if the blob modified or not: + key: 'if_modified_since', 'if_unmodified_since', value type: datetime + etag: + key: 'etag', value type: str + match the etag or not: + key: 'match_condition', value type: MatchConditions + tags match condition: + key: 'if_tags_match_condition', value type: str + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword str delete_snapshots: + Required if a blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: An iterator of responses, one for each blob in order + :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START delete_multiple_blobs] + :end-before: [END delete_multiple_blobs] + :language: python + :dedent: 8 + :caption: Deleting multiple blobs. + """ + if len(blobs) == 0: + return iter(list()) + + reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) + + return self._batch_send(*reqs, **options) + + def _generate_set_tiers_subrequest_options( + self, tier, snapshot=None, version_id=None, rehydrate_priority=None, lease_access_conditions=None, **kwargs + ): + """This code is a copy from _generated. + + Once Autorest is able to provide request preparation this code should be removed. + """ + if not tier: + raise ValueError("A blob tier must be specified") + if snapshot and version_id: + raise ValueError("Snapshot and version_id cannot be set at the same time") + if_tags = kwargs.pop('if_tags', None) + + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "tier" + timeout = kwargs.pop('timeout', None) + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access + if version_id is not None: + query_parameters['versionid'] = self._client._serialize.query("version_id", version_id, 'str') # pylint: disable=protected-access + if timeout is not None: + query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access + query_parameters['comp'] = self._client._serialize.query("comp", comp, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call + + # Construct headers + header_parameters = {} + header_parameters['x-ms-access-tier'] = self._client._serialize.header("tier", tier, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call + if rehydrate_priority is not None: + header_parameters['x-ms-rehydrate-priority'] = self._client._serialize.header( # pylint: disable=protected-access + "rehydrate_priority", rehydrate_priority, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._client._serialize.header("lease_id", lease_id, 'str') # pylint: disable=protected-access + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access + + return query_parameters, header_parameters + + def _generate_set_tiers_options(self, + blob_tier, # type: Optional[Union[str, StandardBlobTier, PremiumPageBlobTier]] + *blobs, # type: List[Union[str, BlobProperties, dict]] + **kwargs + ): + timeout = kwargs.pop('timeout', None) + raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) + rehydrate_priority = kwargs.pop('rehydrate_priority', None) + if_tags = kwargs.pop('if_tags_match_condition', None) + kwargs.update({'raise_on_any_failure': raise_on_any_failure, + 'sas': self._query_str.replace('?', '&'), + 'timeout': '&timeout=' + str(timeout) if timeout else "", + 'path': self.container_name, + 'restype': 'restype=container&' + }) + + reqs = [] + for blob in blobs: + blob_name = _get_blob_name(blob) + container_name = self.container_name + + try: + tier = blob_tier or blob.get('blob_tier') + query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( + tier=tier, + snapshot=blob.get('snapshot'), + version_id=blob.get('version_id'), + rehydrate_priority=rehydrate_priority or blob.get('rehydrate_priority'), + lease_access_conditions=blob.get('lease_id'), + if_tags=if_tags or blob.get('if_tags_match_condition'), + timeout=timeout or blob.get('timeout') + ) + except AttributeError: + query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( + blob_tier, rehydrate_priority=rehydrate_priority, if_tags=if_tags) + + req = HttpRequest( + "PUT", + "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), + headers=header_parameters + ) + req.format_parameters(query_parameters) + reqs.append(req) + + return reqs, kwargs + + @distributed_trace + def set_standard_blob_tier_blobs( + self, + standard_blob_tier, # type: Optional[Union[str, StandardBlobTier]] + *blobs, # type: List[Union[str, BlobProperties, dict]] + **kwargs + ): + # type: (...) -> Iterator[HttpResponse] + """This operation sets the tier on block blobs. + + A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + :param standard_blob_tier: + Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', + 'Archive'. The hot tier is optimized for storing data that is accessed + frequently. The cool storage tier is optimized for storing data that + is infrequently accessed and stored for at least a month. The archive + tier is optimized for storing data that is rarely accessed and stored + for at least six months with flexible latency requirements. + + .. note:: + If you want to set different tier on different blobs please set this positional parameter to None. + Then the blob tier on every BlobProperties will be taken. + + :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier + :param blobs: + The blobs with which to interact. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + standard blob tier: + key: 'blob_tier', value type: StandardBlobTier + rehydrate priority: + key: 'rehydrate_priority', value type: RehydratePriority + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + snapshot: + key: "snapshost", value type: str + version id: + key: "version_id", value type: str + tags match condition: + key: 'if_tags_match_condition', value type: str + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. + :return: An iterator of responses, one for each blob in order + :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] + """ + reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) + + return self._batch_send(*reqs, **options) + + @distributed_trace + def set_premium_page_blob_tier_blobs( + self, + premium_page_blob_tier, # type: Optional[Union[str, PremiumPageBlobTier]] + *blobs, # type: List[Union[str, BlobProperties, dict]] + **kwargs + ): + # type: (...) -> Iterator[HttpResponse] + """Sets the page blob tiers on all blobs. This API is only supported for page blobs on premium accounts. + + :param premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + + .. note:: + If you want to set different tier on different blobs please set this positional parameter to None. + Then the blob tier on every BlobProperties will be taken. + + :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :param blobs: + The blobs with which to interact. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + premium blob tier: + key: 'blob_tier', value type: PremiumPageBlobTier + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. + :return: An iterator of responses, one for each blob in order + :rtype: iterator[~azure.core.pipeline.transport.HttpResponse] + """ + reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) + + return self._batch_send(*reqs, **options) + + def get_blob_client( + self, blob, # type: Union[str, BlobProperties] + snapshot=None # type: str + ): + # type: (...) -> BlobClient + """Get a client to interact with the specified blob. + + The blob need not already exist. + + :param blob: + The blob with which to interact. + :type blob: str or ~azure.storage.blob.BlobProperties + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`~BlobClient.create_snapshot()`. + :returns: A BlobClient. + :rtype: ~azure.storage.blob.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START get_blob_client] + :end-before: [END get_blob_client] + :language: python + :dedent: 8 + :caption: Get the blob client. + """ + blob_name = _get_blob_name(blob) + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return BlobClient( + self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_deserialize.py b/azure/multiapi/storagev2/blob/v2020_06_12/_deserialize.py new file mode 100644 index 0000000..dff3953 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_deserialize.py @@ -0,0 +1,166 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use +from typing import ( # pylint: disable=unused-import + Tuple, Dict, List, + TYPE_CHECKING +) + +from ._models import BlobType, CopyProperties, ContentSettings, LeaseProperties, BlobProperties +from ._shared.models import get_enum_value + +from ._shared.response_handlers import deserialize_metadata +from ._models import ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \ + StaticWebsite, ObjectReplicationPolicy, ObjectReplicationRule + +if TYPE_CHECKING: + from ._generated.models import PageList + + +def deserialize_pipeline_response_into_cls(cls_method, response, obj, headers): + try: + deserialized_response = response.http_response + except AttributeError: + deserialized_response = response + return cls_method(deserialized_response, obj, headers) + + +def deserialize_blob_properties(response, obj, headers): + blob_properties = BlobProperties( + metadata=deserialize_metadata(response, obj, headers), + object_replication_source_properties=deserialize_ors_policies(response.http_response.headers), + **headers + ) + if 'Content-Range' in headers: + if 'x-ms-blob-content-md5' in headers: + blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] + else: + blob_properties.content_settings.content_md5 = None + return blob_properties + + +def deserialize_ors_policies(policy_dictionary): + + if policy_dictionary is None: + return None + # For source blobs (blobs that have policy ids and rule ids applied to them), + # the header will be formatted as "x-ms-or-_: {Complete, Failed}". + # The value of this header is the status of the replication. + or_policy_status_headers = {key: val for key, val in policy_dictionary.items() + if 'or-' in key and key != 'x-ms-or-policy-id'} + + parsed_result = {} + + for key, val in or_policy_status_headers.items(): + # list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule + policy_and_rule_ids = key.split('or-')[1].split('_') + policy_id = policy_and_rule_ids[0] + rule_id = policy_and_rule_ids[1] + + # If we are seeing this policy for the first time, create a new list to store rule_id -> result + parsed_result[policy_id] = parsed_result.get(policy_id) or list() + parsed_result[policy_id].append(ObjectReplicationRule(rule_id=rule_id, status=val)) + + result_list = [ObjectReplicationPolicy(policy_id=k, rules=v) for k, v in parsed_result.items()] + + return result_list + + +def deserialize_blob_stream(response, obj, headers): + blob_properties = deserialize_blob_properties(response, obj, headers) + obj.properties = blob_properties + return response.http_response.location_mode, obj + + +def deserialize_container_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + container_properties = ContainerProperties( + metadata=metadata, + **headers + ) + return container_properties + + +def get_page_ranges_result(ranges): + # type: (PageList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + page_range = [] # type: ignore + clear_range = [] # type: List + if ranges.page_range: + page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range] # type: ignore + if ranges.clear_range: + clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range] + return page_range, clear_range # type: ignore + + +def service_stats_deserialize(generated): + """Deserialize a ServiceStats objects into a dict. + """ + return { + 'geo_replication': { + 'status': generated.geo_replication.status, + 'last_sync_time': generated.geo_replication.last_sync_time, + } + } + + +def service_properties_deserialize(generated): + """Deserialize a ServiceProperties objects into a dict. + """ + return { + 'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-access + 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access + 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access + 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access + 'target_version': generated.default_service_version, # pylint: disable=protected-access + 'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access + 'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access + } + + +def get_blob_properties_from_generated_code(generated): + blob = BlobProperties() + blob.name = generated.name + blob_type = get_enum_value(generated.properties.blob_type) + blob.blob_type = BlobType(blob_type) if blob_type else None + blob.etag = generated.properties.etag + blob.deleted = generated.deleted + blob.snapshot = generated.snapshot + blob.is_append_blob_sealed = generated.properties.is_sealed + blob.metadata = generated.metadata.additional_properties if generated.metadata else {} + blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None + blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access + blob.last_modified = generated.properties.last_modified + blob.creation_time = generated.properties.creation_time + blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access + blob.size = generated.properties.content_length + blob.page_blob_sequence_number = generated.properties.blob_sequence_number + blob.server_encrypted = generated.properties.server_encrypted + blob.encryption_scope = generated.properties.encryption_scope + blob.deleted_time = generated.properties.deleted_time + blob.remaining_retention_days = generated.properties.remaining_retention_days + blob.blob_tier = generated.properties.access_tier + blob.rehydrate_priority = generated.properties.rehydrate_priority + blob.blob_tier_inferred = generated.properties.access_tier_inferred + blob.archive_status = generated.properties.archive_status + blob.blob_tier_change_time = generated.properties.access_tier_change_time + blob.version_id = generated.version_id + blob.is_current_version = generated.is_current_version + blob.tag_count = generated.properties.tag_count + blob.tags = parse_tags(generated.blob_tags) # pylint: disable=protected-access + blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata) + blob.last_accessed_on = generated.properties.last_accessed_on + return blob + + +def parse_tags(generated_tags): + # type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None] + """Deserialize a list of BlobTag objects into a dict. + """ + if generated_tags: + tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set} + return tag_dict + return None diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_download.py b/azure/multiapi/storagev2/blob/v2020_06_12/_download.py new file mode 100644 index 0000000..d17a211 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_download.py @@ -0,0 +1,638 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +import threading +import time + +import warnings +from io import BytesIO +from typing import Iterator + +import requests +from azure.core.exceptions import HttpResponseError, ServiceResponseError + +from azure.core.tracing.common import with_current_context +from ._shared.encryption import decrypt_blob +from ._shared.request_handlers import validate_and_format_range_headers +from ._shared.response_handlers import process_storage_error, parse_length_from_content_range +from ._deserialize import get_page_ranges_result + + +def process_range_and_offset(start_range, end_range, length, encryption): + start_offset, end_offset = 0, 0 + if encryption.get("key") is not None or encryption.get("resolver") is not None: + if start_range is not None: + # Align the start of the range along a 16 byte block + start_offset = start_range % 16 + start_range -= start_offset + + # Include an extra 16 bytes for the IV if necessary + # Because of the previous offsetting, start_range will always + # be a multiple of 16. + if start_range > 0: + start_offset += 16 + start_range -= 16 + + if length is not None: + # Align the end of the range along a 16 byte block + end_offset = 15 - (end_range % 16) + end_range += end_offset + + return (start_range, end_range), (start_offset, end_offset) + + +def process_content(data, start_offset, end_offset, encryption): + if data is None: + raise ValueError("Response cannot be None.") + + content = b"".join(list(data)) + + if content and encryption.get("key") is not None or encryption.get("resolver") is not None: + try: + return decrypt_blob( + encryption.get("required"), + encryption.get("key"), + encryption.get("resolver"), + content, + start_offset, + end_offset, + data.response.headers, + ) + except Exception as error: + raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) + return content + + +class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + client=None, + non_empty_ranges=None, + total_size=None, + chunk_size=None, + current_progress=None, + start_range=None, + end_range=None, + stream=None, + parallel=None, + validate_content=None, + encryption_options=None, + **kwargs + ): + self.client = client + self.non_empty_ranges = non_empty_ranges + + # Information on the download range/chunk size + self.chunk_size = chunk_size + self.total_size = total_size + self.start_index = start_range + self.end_index = end_range + + # The destination that we will write to + self.stream = stream + self.stream_lock = threading.Lock() if parallel else None + self.progress_lock = threading.Lock() if parallel else None + + # For a parallel download, the stream is always seekable, so we note down the current position + # in order to seek to the right place when out-of-order chunks come in + self.stream_start = stream.tell() if parallel else None + + # Download progress so far + self.progress_total = current_progress + + # Encryption + self.encryption_options = encryption_options + + # Parameters for each get operation + self.validate_content = validate_content + self.request_options = kwargs + + def _calculate_range(self, chunk_start): + if chunk_start + self.chunk_size > self.end_index: + chunk_end = self.end_index + else: + chunk_end = chunk_start + self.chunk_size + return chunk_start, chunk_end + + def get_chunk_offsets(self): + index = self.start_index + while index < self.end_index: + yield index + index += self.chunk_size + + def process_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + chunk_data = self._download_chunk(chunk_start, chunk_end - 1) + length = chunk_end - chunk_start + if length > 0: + self._write_to_stream(chunk_data, chunk_start) + self._update_progress(length) + + def yield_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + return self._download_chunk(chunk_start, chunk_end - 1) + + def _update_progress(self, length): + if self.progress_lock: + with self.progress_lock: # pylint: disable=not-context-manager + self.progress_total += length + else: + self.progress_total += length + + def _write_to_stream(self, chunk_data, chunk_start): + if self.stream_lock: + with self.stream_lock: # pylint: disable=not-context-manager + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + else: + self.stream.write(chunk_data) + + def _do_optimize(self, given_range_start, given_range_end): + # If we have no page range list stored, then assume there's data everywhere for that page blob + # or it's a block blob or append blob + if self.non_empty_ranges is None: + return False + + for source_range in self.non_empty_ranges: + # Case 1: As the range list is sorted, if we've reached such a source_range + # we've checked all the appropriate source_range already and haven't found any overlapping. + # so the given range doesn't have any data and download optimization could be applied. + # given range: | | + # source range: | | + if given_range_end < source_range['start']: # pylint:disable=no-else-return + return True + # Case 2: the given range comes after source_range, continue checking. + # given range: | | + # source range: | | + elif source_range['end'] < given_range_start: + pass + # Case 3: source_range and given range overlap somehow, no need to optimize. + else: + return False + # Went through all src_ranges, but nothing overlapped. Optimization will be applied. + return True + + def _download_chunk(self, chunk_start, chunk_end): + download_range, offset = process_range_and_offset( + chunk_start, chunk_end, chunk_end, self.encryption_options + ) + + # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. + # Do optimize and create empty chunk locally if condition is met. + if self._do_optimize(download_range[0], download_range[1]): + chunk_data = b"\x00" * self.chunk_size + else: + range_header, range_validation = validate_and_format_range_headers( + download_range[0], + download_range[1], + check_content_md5=self.validate_content + ) + + retry_active = True + retry_total = 3 + while retry_active: + try: + _, response = self.client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self.validate_content, + data_stream_total=self.total_size, + download_stream_current=self.progress_total, + **self.request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + try: + chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) + retry_active = False + except (requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError) as error: + retry_total -= 1 + if retry_total <= 0: + raise ServiceResponseError(error, error=error) + time.sleep(1) + + # This makes sure that if_match is set so that we can validate + # that subsequent downloads are to an unmodified blob + if self.request_options.get("modified_access_conditions"): + self.request_options["modified_access_conditions"].if_match = response.properties.etag + + return chunk_data + + +class _ChunkIterator(object): + """Async iterator for chunks in blob download stream.""" + + def __init__(self, size, content, downloader, chunk_size): + self.size = size + self._chunk_size = chunk_size + self._current_content = content + self._iter_downloader = downloader + self._iter_chunks = None + self._complete = (size == 0) + + def __len__(self): + return self.size + + def __iter__(self): + return self + + def __next__(self): + """Iterate through responses.""" + if self._complete: + raise StopIteration("Download complete") + if not self._iter_downloader: + # cut the data obtained from initial GET into chunks + if len(self._current_content) > self._chunk_size: + return self._get_chunk_data() + self._complete = True + return self._current_content + + if not self._iter_chunks: + self._iter_chunks = self._iter_downloader.get_chunk_offsets() + + # initial GET result still has more than _chunk_size bytes of data + if len(self._current_content) >= self._chunk_size: + return self._get_chunk_data() + + try: + chunk = next(self._iter_chunks) + self._current_content += self._iter_downloader.yield_chunk(chunk) + except StopIteration as e: + self._complete = True + if self._current_content: + return self._current_content + raise e + + # the current content from the first get is still there but smaller than chunk size + # therefore we want to make sure its also included + return self._get_chunk_data() + + next = __next__ # Python 2 compatibility. + + def _get_chunk_data(self): + chunk_data = self._current_content[: self._chunk_size] + self._current_content = self._current_content[self._chunk_size:] + return chunk_data + + +class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the blob being downloaded. + :ivar str container: + The name of the container where the blob is. + :ivar ~azure.storage.blob.BlobProperties properties: + The properties of the blob being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if specified, + otherwise the total size of the blob. + """ + + def __init__( + self, + clients=None, + config=None, + start_range=None, + end_range=None, + validate_content=None, + encryption_options=None, + max_concurrency=1, + name=None, + container=None, + encoding=None, + **kwargs + ): + self.name = name + self.container = container + self.properties = None + self.size = None + + self._clients = clients + self._config = config + self._start_range = start_range + self._end_range = end_range + self._max_concurrency = max_concurrency + self._encoding = encoding + self._validate_content = validate_content + self._encryption_options = encryption_options or {} + self._request_options = kwargs + self._location_mode = None + self._download_complete = False + self._current_content = None + self._file_size = None + self._non_empty_ranges = None + self._response = None + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + self._first_get_size = ( + self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size + ) + initial_request_start = self._start_range if self._start_range is not None else 0 + if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: + initial_request_end = self._end_range + else: + initial_request_end = initial_request_start + self._first_get_size - 1 + + self._initial_range, self._initial_offset = process_range_and_offset( + initial_request_start, initial_request_end, self._end_range, self._encryption_options + ) + + self._response = self._initial_request() + self.properties = self._response.properties + self.properties.name = self.name + self.properties.container = self.container + + # Set the content length to the download size instead of the size of + # the last range + self.properties.size = self.size + + # Overwrite the content range to the user requested range + self.properties.content_range = "bytes {0}-{1}/{2}".format( + self._start_range, + self._end_range, + self._file_size + ) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + self.properties.content_md5 = None + + def __len__(self): + return self.size + + def _initial_request(self): + range_header, range_validation = validate_and_format_range_headers( + self._initial_range[0], + self._initial_range[1], + start_range_required=False, + end_range_required=False, + check_content_md5=self._validate_content + ) + + retry_active = True + retry_total = 3 + while retry_active: + try: + location_mode, response = self._clients.blob.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self._validate_content, + data_stream_total=None, + download_stream_current=0, + **self._request_options + ) + + # Check the location we read from to ensure we use the same one + # for subsequent requests. + self._location_mode = location_mode + + # Parse the total file size and adjust the download size if ranges + # were specified + self._file_size = parse_length_from_content_range(response.properties.content_range) + if self._end_range is not None: + # Use the end range index unless it is over the end of the file + self.size = min(self._file_size, self._end_range - self._start_range + 1) + elif self._start_range is not None: + self.size = self._file_size - self._start_range + else: + self.size = self._file_size + + except HttpResponseError as error: + if self._start_range is None and error.response.status_code == 416: + # Get range will fail on an empty file. If the user did not + # request a range, do a regular get request in order to get + # any properties. + try: + _, response = self._clients.blob.download( + validate_content=self._validate_content, + data_stream_total=0, + download_stream_current=0, + **self._request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + # Set the download size to empty + self.size = 0 + self._file_size = 0 + else: + process_storage_error(error) + + try: + if self.size == 0: + self._current_content = b"" + else: + self._current_content = process_content( + response, + self._initial_offset[0], + self._initial_offset[1], + self._encryption_options + ) + retry_active = False + except (requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError) as error: + retry_total -= 1 + if retry_total <= 0: + raise ServiceResponseError(error, error=error) + time.sleep(1) + + # get page ranges to optimize downloading sparse page blob + if response.properties.blob_type == 'PageBlob': + try: + page_ranges = self._clients.page_blob.get_page_ranges() + self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] + # according to the REST API documentation: + # in a highly fragmented page blob with a large number of writes, + # a Get Page Ranges request can fail due to an internal server timeout. + # thus, if the page blob is not sparse, it's ok for it to fail + except HttpResponseError: + pass + + # If the file is small, the download is complete at this point. + # If file size is large, download the rest of the file in chunks. + if response.properties.size != self.size: + # Lock on the etag. This can be overriden by the user by specifying '*' + if self._request_options.get("modified_access_conditions"): + if not self._request_options["modified_access_conditions"].if_match: + self._request_options["modified_access_conditions"].if_match = response.properties.etag + else: + self._download_complete = True + return response + + def chunks(self): + # type: () -> Iterator[bytes] + """Iterate over chunks in the download stream. + + :rtype: Iterator[bytes] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world.py + :start-after: [START download_a_blob_in_chunk] + :end-before: [END download_a_blob_in_chunk] + :language: python + :dedent: 12 + :caption: Download a blob using chunks(). + """ + if self.size == 0 or self._download_complete: + iter_downloader = None + else: + data_end = self._file_size + if self._end_range is not None: + # Use the end range index unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + iter_downloader = _ChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # start where the first download ended + end_range=data_end, + stream=None, + parallel=False, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options + ) + return _ChunkIterator( + size=self.size, + content=self._current_content, + downloader=iter_downloader, + chunk_size=self._config.max_chunk_get_size) + + def readall(self): + """Download the contents of this blob. + + This operation is blocking until all data is downloaded. + + :rtype: bytes or str + """ + stream = BytesIO() + self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + def content_as_bytes(self, max_concurrency=1): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :rtype: bytes + """ + warnings.warn( + "content_as_bytes is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + return self.readall() + + def content_as_text(self, max_concurrency=1, encoding="UTF-8"): + """Download the contents of this blob, and decode as text. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :param str encoding: + Test encoding to decode the downloaded bytes. Default is UTF-8. + :rtype: str + """ + warnings.warn( + "content_as_text is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self._encoding = encoding + return self.readall() + + def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + # The stream must be seekable if parallel download is required + parallel = self._max_concurrency > 1 + if parallel: + error_message = "Target stream handle must be seekable." + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(error_message) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(error_message) + + # Write the content to the user stream + stream.write(self._current_content) + if self._download_complete: + return self.size + + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + downloader = _ChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # Start where the first download ended + end_range=data_end, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options + ) + if parallel: + import concurrent.futures + with concurrent.futures.ThreadPoolExecutor(self._max_concurrency) as executor: + list(executor.map( + with_current_context(downloader.process_chunk), + downloader.get_chunk_offsets() + )) + else: + for chunk in downloader.get_chunk_offsets(): + downloader.process_chunk(chunk) + return self.size + + def download_to_stream(self, stream, max_concurrency=1): + """Download the contents of this blob to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The properties of the downloaded blob. + :rtype: Any + """ + warnings.warn( + "download_to_stream is deprecated, use readinto instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self.readinto(stream) + return self.properties diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/__init__.py new file mode 100644 index 0000000..cc760e7 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/__init__.py @@ -0,0 +1,16 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._azure_blob_storage import AzureBlobStorage +__all__ = ['AzureBlobStorage'] + +try: + from ._patch import patch_sdk # type: ignore + patch_sdk() +except ImportError: + pass diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/_azure_blob_storage.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/_azure_blob_storage.py new file mode 100644 index 0000000..dff7e12 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/_azure_blob_storage.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING + +from azure.core import PipelineClient +from msrest import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + +from ._configuration import AzureBlobStorageConfiguration +from .operations import ServiceOperations +from .operations import ContainerOperations +from .operations import DirectoryOperations +from .operations import BlobOperations +from .operations import PageBlobOperations +from .operations import AppendBlobOperations +from .operations import BlockBlobOperations +from . import models + + +class AzureBlobStorage(object): + """AzureBlobStorage. + + :ivar service: ServiceOperations operations + :vartype service: azure.storage.blob.operations.ServiceOperations + :ivar container: ContainerOperations operations + :vartype container: azure.storage.blob.operations.ContainerOperations + :ivar directory: DirectoryOperations operations + :vartype directory: azure.storage.blob.operations.DirectoryOperations + :ivar blob: BlobOperations operations + :vartype blob: azure.storage.blob.operations.BlobOperations + :ivar page_blob: PageBlobOperations operations + :vartype page_blob: azure.storage.blob.operations.PageBlobOperations + :ivar append_blob: AppendBlobOperations operations + :vartype append_blob: azure.storage.blob.operations.AppendBlobOperations + :ivar block_blob: BlockBlobOperations operations + :vartype block_blob: azure.storage.blob.operations.BlockBlobOperations + :param url: The URL of the service account, container, or blob that is the targe of the desired operation. + :type url: str + """ + + def __init__( + self, + url, # type: str + **kwargs # type: Any + ): + # type: (...) -> None + base_url = '{url}' + self._config = AzureBlobStorageConfiguration(url, **kwargs) + self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._serialize.client_side_validation = False + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.container = ContainerOperations( + self._client, self._config, self._serialize, self._deserialize) + self.directory = DirectoryOperations( + self._client, self._config, self._serialize, self._deserialize) + self.blob = BlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.page_blob = PageBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.append_blob = AppendBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.block_blob = BlockBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + + def close(self): + # type: () -> None + self._client.close() + + def __enter__(self): + # type: () -> AzureBlobStorage + self._client.__enter__() + return self + + def __exit__(self, *exc_details): + # type: (Any) -> None + self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/_configuration.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/_configuration.py new file mode 100644 index 0000000..fb74f9e --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/_configuration.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + +VERSION = "unknown" + +class AzureBlobStorageConfiguration(Configuration): + """Configuration for AzureBlobStorage. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, container, or blob that is the targe of the desired operation. + :type url: str + """ + + def __init__( + self, + url, # type: str + **kwargs # type: Any + ): + # type: (...) -> None + if url is None: + raise ValueError("Parameter 'url' must not be None.") + super(AzureBlobStorageConfiguration, self).__init__(**kwargs) + + self.url = url + self.version = "2020-06-12" + kwargs.setdefault('sdk_moniker', 'azureblobstorage/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs # type: Any + ): + # type: (...) -> None + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/__init__.py new file mode 100644 index 0000000..12cfcf6 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/__init__.py @@ -0,0 +1,10 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._azure_blob_storage import AzureBlobStorage +__all__ = ['AzureBlobStorage'] diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/_azure_blob_storage.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/_azure_blob_storage.py new file mode 100644 index 0000000..b537034 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/_azure_blob_storage.py @@ -0,0 +1,83 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core import AsyncPipelineClient +from msrest import Deserializer, Serializer + +from ._configuration import AzureBlobStorageConfiguration +from .operations import ServiceOperations +from .operations import ContainerOperations +from .operations import DirectoryOperations +from .operations import BlobOperations +from .operations import PageBlobOperations +from .operations import AppendBlobOperations +from .operations import BlockBlobOperations +from .. import models + + +class AzureBlobStorage(object): + """AzureBlobStorage. + + :ivar service: ServiceOperations operations + :vartype service: azure.storage.blob.aio.operations.ServiceOperations + :ivar container: ContainerOperations operations + :vartype container: azure.storage.blob.aio.operations.ContainerOperations + :ivar directory: DirectoryOperations operations + :vartype directory: azure.storage.blob.aio.operations.DirectoryOperations + :ivar blob: BlobOperations operations + :vartype blob: azure.storage.blob.aio.operations.BlobOperations + :ivar page_blob: PageBlobOperations operations + :vartype page_blob: azure.storage.blob.aio.operations.PageBlobOperations + :ivar append_blob: AppendBlobOperations operations + :vartype append_blob: azure.storage.blob.aio.operations.AppendBlobOperations + :ivar block_blob: BlockBlobOperations operations + :vartype block_blob: azure.storage.blob.aio.operations.BlockBlobOperations + :param url: The URL of the service account, container, or blob that is the targe of the desired operation. + :type url: str + """ + + def __init__( + self, + url: str, + **kwargs: Any + ) -> None: + base_url = '{url}' + self._config = AzureBlobStorageConfiguration(url, **kwargs) + self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._serialize.client_side_validation = False + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.container = ContainerOperations( + self._client, self._config, self._serialize, self._deserialize) + self.directory = DirectoryOperations( + self._client, self._config, self._serialize, self._deserialize) + self.blob = BlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.page_blob = PageBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.append_blob = AppendBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.block_blob = BlockBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "AzureBlobStorage": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details) -> None: + await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/_configuration.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/_configuration.py new file mode 100644 index 0000000..1924efa --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/_configuration.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +VERSION = "unknown" + +class AzureBlobStorageConfiguration(Configuration): + """Configuration for AzureBlobStorage. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, container, or blob that is the targe of the desired operation. + :type url: str + """ + + def __init__( + self, + url: str, + **kwargs: Any + ) -> None: + if url is None: + raise ValueError("Parameter 'url' must not be None.") + super(AzureBlobStorageConfiguration, self).__init__(**kwargs) + + self.url = url + self.version = "2020-06-12" + kwargs.setdefault('sdk_moniker', 'azureblobstorage/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs: Any + ) -> None: + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/__init__.py new file mode 100644 index 0000000..62f85c9 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/__init__.py @@ -0,0 +1,25 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._container_operations import ContainerOperations +from ._directory_operations import DirectoryOperations +from ._blob_operations import BlobOperations +from ._page_blob_operations import PageBlobOperations +from ._append_blob_operations import AppendBlobOperations +from ._block_blob_operations import BlockBlobOperations + +__all__ = [ + 'ServiceOperations', + 'ContainerOperations', + 'DirectoryOperations', + 'BlobOperations', + 'PageBlobOperations', + 'AppendBlobOperations', + 'BlockBlobOperations', +] diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_append_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_append_blob_operations.py new file mode 100644 index 0000000..934b720 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_append_blob_operations.py @@ -0,0 +1,700 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class AppendBlobOperations: + """AppendBlobOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create( + self, + content_length: int, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """The Create Append Blob operation creates a new append blob. + + :param content_length: The length of the request. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + blob_type = "AppendBlob" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def append_block( + self, + content_length: int, + body: IO, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytearray] = None, + transactional_content_crc64: Optional[bytearray] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """The Append Block operation commits a new block of data to the end of an existing append blob. + The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to + AppendBlob. Append Block is supported only on version 2015-02-21 version or later. + + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param append_position_access_conditions: Parameter group. + :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _max_size = None + _append_position = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if append_position_access_conditions is not None: + _max_size = append_position_access_conditions.max_size + _append_position = append_position_access_conditions.append_position + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "appendblock" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.append_block.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _max_size is not None: + header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') + if _append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + append_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def append_block_from_url( + self, + source_url: str, + content_length: int, + source_range: Optional[str] = None, + source_content_md5: Optional[bytearray] = None, + source_contentcrc64: Optional[bytearray] = None, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytearray] = None, + request_id_parameter: Optional[str] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """The Append Block operation commits a new block of data to the end of an existing append blob + where the contents are read from a source url. The Append Block operation is permitted only if + the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on + version 2015-02-21 version or later. + + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param content_length: The length of the request. + :type content_length: long + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param append_position_access_conditions: Parameter group. + :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _max_size = None + _append_position = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if append_position_access_conditions is not None: + _max_size = append_position_access_conditions.max_size + _append_position = append_position_access_conditions.append_position + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + comp = "appendblock" + accept = "application/xml" + + # Construct URL + url = self.append_block_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _max_size is not None: + header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') + if _append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def seal( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, + **kwargs + ) -> None: + """The Seal operation seals the Append Blob to make it read-only. Seal is supported only on + version 2019-12-12 version or later. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param append_position_access_conditions: Parameter group. + :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _append_position = None + if append_position_access_conditions is not None: + _append_position = append_position_access_conditions.append_position + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + comp = "seal" + accept = "application/xml" + + # Construct URL + url = self.seal.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) + + if cls: + return cls(pipeline_response, None, response_headers) + + seal.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_blob_operations.py new file mode 100644 index 0000000..0f296df --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_blob_operations.py @@ -0,0 +1,3122 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class BlobOperations: + """BlobOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def download( + self, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + range_get_content_md5: Optional[bool] = None, + range_get_content_crc64: Optional[bool] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> IO: + """The Download operation reads or downloads a blob from the system, including its metadata and + properties. You can also call Download to read a snapshot. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param range_get_content_md5: When set to true and specified together with the Range, the + service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB + in size. + :type range_get_content_md5: bool + :param range_get_content_crc64: When set to true and specified together with the Range, the + service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 + MB in size. + :type range_get_content_crc64: bool + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + accept = "application/xml" + + # Construct URL + url = self.download.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') + if range_get_content_crc64 is not None: + header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) + response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) + response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) + response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) + response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) + response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) + response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) + response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) + response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + download.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def get_properties( + self, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and + system properties for the blob. It does not return the content of the blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-creation-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) + response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-incremental-copy']=self._deserialize('bool', response.headers.get('x-ms-incremental-copy')) + response_headers['x-ms-copy-destination-snapshot']=self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) + response_headers['x-ms-access-tier-inferred']=self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')) + response_headers['x-ms-archive-status']=self._deserialize('str', response.headers.get('x-ms-archive-status')) + response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) + response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) + response_headers['x-ms-expiry-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')) + response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) + response_headers['x-ms-rehydrate-priority']=self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')) + response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def delete( + self, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + delete_snapshots: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] = None, + request_id_parameter: Optional[str] = None, + blob_delete_type: Optional[str] = "Permanent", + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is + permanently removed from the storage account. If the storage account's soft delete feature is + enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible + immediately. However, the blob service retains the blob or snapshot for the number of days + specified by the DeleteRetentionPolicy section of [Storage service properties] (Set-Blob- + Service-Properties.md). After the specified number of days has passed, the blob's data is + permanently removed from the storage account. Note that you continue to be charged for the + soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify + the "include=deleted" query parameter to discover which blobs and snapshots have been soft + deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other + operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code + of 404 (ResourceNotFound). + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param delete_snapshots: Required if the blob has associated snapshots. Specify one of the + following two options: include: Delete the base blob and all of its snapshots. only: Delete + only the blob's snapshots and not the blob itself. + :type delete_snapshots: str or ~azure.storage.blob.models.DeleteSnapshotsOptionType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_delete_type: Optional. Only possible value is 'permanent', which specifies to + permanently delete a blob if blob soft delete is enabled. + :type blob_delete_type: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if blob_delete_type is not None: + query_parameters['deletetype'] = self._serialize.query("blob_delete_type", blob_delete_type, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if delete_snapshots is not None: + header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def set_access_control( + self, + timeout: Optional[int] = None, + owner: Optional[str] = None, + group: Optional[str] = None, + posix_permissions: Optional[str] = None, + posix_acl: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Set the owner, group, permissions, or access control list for a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_acl: Sets POSIX access control rights on files and directories. The value is a + comma-separated list of access control entries. Each access control entry (ACE) consists of a + scope, a type, a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type posix_acl: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "setAccessControl" + accept = "application/xml" + + # Construct URL + url = self.set_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def get_access_control( + self, + timeout: Optional[int] = None, + upn: Optional[bool] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Get the owner, group, permissions, or access control list for a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If + "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response + headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If + "false", the values will be returned as Azure Active Directory Object IDs. The default value is + false. + :type upn: bool + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "getAccessControl" + accept = "application/xml" + + # Construct URL + url = self.get_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) + response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) + response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) + response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def rename( + self, + rename_source: str, + timeout: Optional[int] = None, + path_rename_mode: Optional[Union[str, "_models.PathRenameMode"]] = None, + directory_properties: Optional[str] = None, + posix_permissions: Optional[str] = None, + posix_umask: Optional[str] = None, + source_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Rename a blob/file. By default, the destination is overwritten and if the destination already + exists and has a lease the lease is broken. This operation supports conditional HTTP requests. + For more information, see `Specifying Conditional Headers for Blob Service Operations + `_. To fail if the destination already exists, use a conditional + request with If-None-Match: "*". + + :param rename_source: The file or directory to be renamed. The value must have the following + format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will + overwrite the existing properties; otherwise, the existing properties will be preserved. + :type rename_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param path_rename_mode: Determines the behavior of the rename operation. + :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode + :param directory_properties: Optional. User-defined properties to be stored with the file or + directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", + where each value is base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask + restricts permission settings for file and directory, and will only be applied when default Acl + does not exist in parent directory. If the umask bit has set, it means that the corresponding + permission will be disabled. Otherwise the corresponding permission will be determined by the + permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, + a default umask - 0027 will be used. + :type posix_umask: str + :param source_lease_id: A lease ID for the source path. If specified, the source path must have + an active lease and the lease ID must match. + :type source_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param directory_http_headers: Parameter group. + :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _cache_control = None + _content_type = None + _content_encoding = None + _content_language = None + _content_disposition = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if directory_http_headers is not None: + _cache_control = directory_http_headers.cache_control + _content_type = directory_http_headers.content_type + _content_encoding = directory_http_headers.content_encoding + _content_language = directory_http_headers.content_language + _content_disposition = directory_http_headers.content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + accept = "application/xml" + + # Construct URL + url = self.rename.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if path_rename_mode is not None: + query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def undelete( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs + ) -> None: + """Undelete a blob that was previously soft deleted. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "undelete" + accept = "application/xml" + + # Construct URL + url = self.undelete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + undelete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def set_expiry( + self, + expiry_options: Union[str, "_models.BlobExpiryOptions"], + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + expires_on: Optional[str] = None, + **kwargs + ) -> None: + """Sets the time a blob will expire and be deleted. + + :param expiry_options: Required. Indicates mode of the expiry time. + :type expiry_options: str or ~azure.storage.blob.models.BlobExpiryOptions + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param expires_on: The time to set the blob to expiry. + :type expires_on: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "expiry" + accept = "application/xml" + + # Construct URL + url = self.set_expiry.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') + if expires_on is not None: + header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_expiry.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def set_http_headers( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """The Set HTTP Headers operation sets system properties on the blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_cache_control = None + _blob_content_type = None + _blob_content_md5 = None + _blob_content_encoding = None + _blob_content_language = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _blob_content_disposition = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_disposition = blob_http_headers.blob_content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.set_http_headers.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_http_headers.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def set_metadata( + self, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or + more name-value pairs. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "metadata" + accept = "application/xml" + + # Construct URL + url = self.set_metadata.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def acquire_lease( + self, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "acquire" + accept = "application/xml" + + # Construct URL + url = self.acquire_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def release_lease( + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "release" + accept = "application/xml" + + # Construct URL + url = self.release_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def renew_lease( + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "renew" + accept = "application/xml" + + # Construct URL + url = self.renew_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + renew_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def change_lease( + self, + lease_id: str, + proposed_lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "change" + accept = "application/xml" + + # Construct URL + url = self.change_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def break_lease( + self, + timeout: Optional[int] = None, + break_period: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param break_period: For a break operation, proposed duration the lease should continue before + it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining on the lease is used. A new + lease will not be available before the break period has expired, but the lease may be held for + longer than the break period. If this header does not appear with a break operation, a fixed- + duration lease breaks after the remaining lease period elapses, and an infinite lease breaks + immediately. + :type break_period: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "break" + accept = "application/xml" + + # Construct URL + url = self.break_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def create_snapshot( + self, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + request_id_parameter: Optional[str] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs + ) -> None: + """The Create Snapshot operation creates a read-only snapshot of a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "snapshot" + accept = "application/xml" + + # Construct URL + url = self.create_snapshot.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create_snapshot.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def start_copy_from_url( + self, + copy_source: str, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + seal_blob: Optional[bool] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs + ) -> None: + """The Start Copy From URL operation copies a blob or an internet resource to a new blob. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived + blob. + :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param seal_blob: Overrides the sealed state of the destination blob. Service version + 2019-12-12 and newer. + :type seal_blob: bool + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _source_if_tags = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_tags = source_modified_access_conditions.source_if_tags + accept = "application/xml" + + # Construct URL + url = self.start_copy_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if rehydrate_priority is not None: + header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + if _source_if_tags is not None: + header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if seal_blob is not None: + header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + + if cls: + return cls(pipeline_response, None, response_headers) + + start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def copy_from_url( + self, + copy_source: str, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + request_id_parameter: Optional[str] = None, + source_content_md5: Optional[bytearray] = None, + blob_tags_string: Optional[str] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs + ) -> None: + """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not + return a response until the copy is complete. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + x_ms_requires_sync = "true" + accept = "application/xml" + + # Construct URL + url = self.copy_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-requires-sync'] = self._serialize.header("x_ms_requires_sync", x_ms_requires_sync, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + + if cls: + return cls(pipeline_response, None, response_headers) + + copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def abort_copy_from_url( + self, + copy_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs + ) -> None: + """The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a + destination blob with zero length and full metadata. + + :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy + Blob operation. + :type copy_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "copy" + copy_action_abort_constant = "abort" + accept = "application/xml" + + # Construct URL + url = self.abort_copy_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def set_tier( + self, + tier: Union[str, "_models.AccessTierRequired"], + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a + premium storage account and on a block blob in a blob storage account (locally redundant + storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of + the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not + update the blob's ETag. + + :param tier: Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierRequired + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived + blob. + :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + comp = "tier" + accept = "application/xml" + + # Construct URL + url = self.set_tier.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if rehydrate_priority is not None: + header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if response.status_code == 202: + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_tier.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def get_account_info( + self, + **kwargs + ) -> None: + """Returns the sku name and account kind. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "account" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_account_info.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) + response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def query( + self, + snapshot: Optional[str] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + query_request: Optional["_models.QueryRequest"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> IO: + """The Query operation enables users to select/project on blob data by providing simple query + expressions. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param query_request: the query request. + :type query_request: ~azure.storage.blob.models.QueryRequest + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "query" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.query.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if query_request is not None: + body_content = self._serialize.body(query_request, 'QueryRequest', is_xml=True) + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + query.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def get_tags( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs + ) -> "_models.BlobTags": + """The Get Tags operation enables users to get the tags associated with a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: BlobTags, or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlobTags + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobTags"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_tags = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + comp = "tags" + accept = "application/xml" + + # Construct URL + url = self.get_tags.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('BlobTags', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def set_tags( + self, + timeout: Optional[int] = None, + version_id: Optional[str] = None, + transactional_content_md5: Optional[bytearray] = None, + transactional_content_crc64: Optional[bytearray] = None, + request_id_parameter: Optional[str] = None, + tags: Optional["_models.BlobTags"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs + ) -> None: + """The Set Tags operation enables users to set tags on a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param tags: Blob tags. + :type tags: ~azure.storage.blob.models.BlobTags + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_tags = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + comp = "tags" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.set_tags.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if tags is not None: + body_content = self._serialize.body(tags, 'BlobTags', is_xml=True) + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_block_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_block_blob_operations.py new file mode 100644 index 0000000..e90eacd --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_block_blob_operations.py @@ -0,0 +1,1088 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class BlockBlobOperations: + """BlockBlobOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def upload( + self, + content_length: int, + body: IO, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytearray] = None, + metadata: Optional[str] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """The Upload Block Blob operation updates the content of an existing block blob. Updating an + existing block blob overwrites any existing metadata on the blob. Partial updates are not + supported with Put Blob; the content of the existing blob is overwritten with the content of + the new blob. To perform a partial update of the content of a block blob, use the Put Block + List operation. + + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + blob_type = "BlockBlob" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.upload.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def put_blob_from_url( + self, + content_length: int, + copy_source: str, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytearray] = None, + metadata: Optional[str] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + request_id_parameter: Optional[str] = None, + source_content_md5: Optional[bytearray] = None, + blob_tags_string: Optional[str] = None, + copy_source_blob_properties: Optional[bool] = None, + blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """The Put Blob from URL operation creates a new Block Blob where the contents of the blob are + read from a given URL. This API is supported beginning with the 2020-04-08 version. Partial + updates are not supported with Put Blob from URL; the content of an existing blob is + overwritten with the content of the new blob. To perform partial updates to a block blob’s + contents using a source URL, use the Put Block from URL API in conjunction with Put Block List. + + :param content_length: The length of the request. + :type content_length: long + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param copy_source_blob_properties: Optional, default is true. Indicates if properties from + the source blob should be copied. + :type copy_source_blob_properties: bool + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _source_if_tags = None + if blob_http_headers is not None: + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_tags = source_modified_access_conditions.source_if_tags + blob_type = "BlockBlob" + accept = "application/xml" + + # Construct URL + url = self.put_blob_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + if _source_if_tags is not None: + header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if copy_source_blob_properties is not None: + header_parameters['x-ms-copy-source-blob-properties'] = self._serialize.header("copy_source_blob_properties", copy_source_blob_properties, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + put_blob_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def stage_block( + self, + block_id: str, + content_length: int, + body: IO, + transactional_content_md5: Optional[bytearray] = None, + transactional_content_crc64: Optional[bytearray] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + **kwargs + ) -> None: + """The Stage Block operation creates a new block to be committed as part of a blob. + + :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the + string must be less than or equal to 64 bytes in size. For a given blob, the length of the + value specified for the blockid parameter must be the same size for each block. + :type block_id: str + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data. + :type body: IO + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "block" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.stage_block.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + stage_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def stage_block_from_url( + self, + block_id: str, + content_length: int, + source_url: str, + source_range: Optional[str] = None, + source_content_md5: Optional[bytearray] = None, + source_contentcrc64: Optional[bytearray] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """The Stage Block operation creates a new block to be committed as part of a blob where the + contents are read from a URL. + + :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the + string must be less than or equal to 64 bytes in size. For a given blob, the length of the + value specified for the blockid parameter must be the same size for each block. + :type block_id: str + :param content_length: The length of the request. + :type content_length: long + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + comp = "block" + accept = "application/xml" + + # Construct URL + url = self.stage_block_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def commit_block_list( + self, + blocks: "_models.BlockLookupList", + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytearray] = None, + transactional_content_crc64: Optional[bytearray] = None, + metadata: Optional[str] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """The Commit Block List operation writes a blob by specifying the list of block IDs that make up + the blob. In order to be written as part of a blob, a block must have been successfully written + to the server in a prior Put Block operation. You can call Put Block List to update a blob by + uploading only those blocks that have changed, then committing the new and existing blocks + together. You can do this by specifying whether to commit a block from the committed block list + or from the uncommitted block list, or to commit the most recently uploaded version of the + block, whichever list it may belong to. + + :param blocks: + :type blocks: ~azure.storage.blob.models.BlockLookupList + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_cache_control = None + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "blocklist" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.commit_block_list.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(blocks, 'BlockLookupList', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + commit_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def get_block_list( + self, + snapshot: Optional[str] = None, + list_type: Union[str, "_models.BlockListType"] = "committed", + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> "_models.BlockList": + """The Get Block List operation retrieves the list of blocks that have been uploaded as part of a + block blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param list_type: Specifies whether to return the list of committed blocks, the list of + uncommitted blocks, or both lists together. + :type list_type: str or ~azure.storage.blob.models.BlockListType + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: BlockList, or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlockList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.BlockList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + comp = "blocklist" + accept = "application/xml" + + # Construct URL + url = self.get_block_list.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('BlockList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_container_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_container_operations.py new file mode 100644 index 0000000..904fea3 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_container_operations.py @@ -0,0 +1,1631 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class ContainerOperations: + """ContainerOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create( + self, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + access: Optional[Union[str, "_models.PublicAccessType"]] = None, + request_id_parameter: Optional[str] = None, + container_cpk_scope_info: Optional["_models.ContainerCpkScopeInfo"] = None, + **kwargs + ) -> None: + """creates a new container under the specified account. If the container with the same name + already exists, the operation fails. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param access: Specifies whether data in the container may be accessed publicly and the level + of access. + :type access: str or ~azure.storage.blob.models.PublicAccessType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param container_cpk_scope_info: Parameter group. + :type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _default_encryption_scope = None + _prevent_encryption_scope_override = None + if container_cpk_scope_info is not None: + _default_encryption_scope = container_cpk_scope_info.default_encryption_scope + _prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override + restype = "container" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if access is not None: + header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _default_encryption_scope is not None: + header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", _default_encryption_scope, 'str') + if _prevent_encryption_scope_override is not None: + header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", _prevent_encryption_scope_override, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{containerName}'} # type: ignore + + async def get_properties( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs + ) -> None: + """returns all user-defined metadata and system properties for the specified container. The data + returned does not include the container's list of blobs. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "container" + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) + response_headers['x-ms-has-immutability-policy']=self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')) + response_headers['x-ms-has-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')) + response_headers['x-ms-default-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')) + response_headers['x-ms-deny-encryption-scope-override']=self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{containerName}'} # type: ignore + + async def delete( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """operation marks the specified container for deletion. The container and any blobs contained + within it are later deleted during garbage collection. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + restype = "container" + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{containerName}'} # type: ignore + + async def set_metadata( + self, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """operation sets one or more user-defined name-value pairs for the specified container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + restype = "container" + comp = "metadata" + accept = "application/xml" + + # Construct URL + url = self.set_metadata.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {'url': '/{containerName}'} # type: ignore + + async def get_access_policy( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs + ) -> List["_models.SignedIdentifier"]: + """gets the permissions for the specified container. The permissions indicate whether container + data may be accessed publicly. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of SignedIdentifier, or the result of cls(response) + :rtype: list[~azure.storage.blob.models.SignedIdentifier] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "container" + comp = "acl" + accept = "application/xml" + + # Construct URL + url = self.get_access_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_access_policy.metadata = {'url': '/{containerName}'} # type: ignore + + async def set_access_policy( + self, + timeout: Optional[int] = None, + access: Optional[Union[str, "_models.PublicAccessType"]] = None, + request_id_parameter: Optional[str] = None, + container_acl: Optional[List["_models.SignedIdentifier"]] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """sets the permissions for the specified container. The permissions indicate whether blobs in a + container may be accessed publicly. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param access: Specifies whether data in the container may be accessed publicly and the level + of access. + :type access: str or ~azure.storage.blob.models.PublicAccessType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param container_acl: the acls for the container. + :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + restype = "container" + comp = "acl" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.set_access_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if access is not None: + header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True, 'itemsName': 'SignedIdentifier'}} + if container_acl is not None: + body_content = self._serialize.body(container_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_policy.metadata = {'url': '/{containerName}'} # type: ignore + + async def restore( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + deleted_container_name: Optional[str] = None, + deleted_container_version: Optional[str] = None, + **kwargs + ) -> None: + """Restores a previously-deleted container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param deleted_container_name: Optional. Version 2019-12-12 and later. Specifies the name of + the deleted container to restore. + :type deleted_container_name: str + :param deleted_container_version: Optional. Version 2019-12-12 and later. Specifies the + version of the deleted container to restore. + :type deleted_container_version: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "undelete" + accept = "application/xml" + + # Construct URL + url = self.restore.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if deleted_container_name is not None: + header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') + if deleted_container_version is not None: + header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + restore.metadata = {'url': '/{containerName}'} # type: ignore + + async def rename( + self, + source_container_name: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + source_lease_id: Optional[str] = None, + **kwargs + ) -> None: + """Renames an existing container. + + :param source_container_name: Required. Specifies the name of the container to rename. + :type source_container_name: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param source_lease_id: A lease ID for the source path. If specified, the source path must have + an active lease and the lease ID must match. + :type source_lease_id: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "rename" + accept = "application/xml" + + # Construct URL + url = self.rename.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-source-container-name'] = self._serialize.header("source_container_name", source_container_name, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {'url': '/{containerName}'} # type: ignore + + async def submit_batch( + self, + content_length: int, + multipart_content_type: str, + body: IO, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs + ) -> IO: + """The Batch operation allows multiple API calls to be embedded into a single HTTP request. + + :param content_length: The length of the request. + :type content_length: long + :param multipart_content_type: Required. The value of this header must be multipart/mixed with + a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. + :type multipart_content_type: str + :param body: Initial data. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "batch" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.submit_batch.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(body, 'IO', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + submit_batch.metadata = {'url': '/{containerName}'} # type: ignore + + async def acquire_lease( + self, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "acquire" + accept = "application/xml" + + # Construct URL + url = self.acquire_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {'url': '/{containerName}'} # type: ignore + + async def release_lease( + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "release" + accept = "application/xml" + + # Construct URL + url = self.release_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {'url': '/{containerName}'} # type: ignore + + async def renew_lease( + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "renew" + accept = "application/xml" + + # Construct URL + url = self.renew_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + renew_lease.metadata = {'url': '/{containerName}'} # type: ignore + + async def break_lease( + self, + timeout: Optional[int] = None, + break_period: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param break_period: For a break operation, proposed duration the lease should continue before + it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining on the lease is used. A new + lease will not be available before the break period has expired, but the lease may be held for + longer than the break period. If this header does not appear with a break operation, a fixed- + duration lease breaks after the remaining lease period elapses, and an infinite lease breaks + immediately. + :type break_period: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "break" + accept = "application/xml" + + # Construct URL + url = self.break_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {'url': '/{containerName}'} # type: ignore + + async def change_lease( + self, + lease_id: str, + proposed_lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "change" + accept = "application/xml" + + # Construct URL + url = self.change_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {'url': '/{containerName}'} # type: ignore + + async def list_blob_flat_segment( + self, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs + ) -> "_models.ListBlobsFlatSegmentResponse": + """[Update] The List Blobs operation returns a list of the blobs under the specified container. + + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. + :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListBlobsFlatSegmentResponse, or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsFlatSegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_blob_flat_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListBlobsFlatSegmentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_blob_flat_segment.metadata = {'url': '/{containerName}'} # type: ignore + + async def list_blob_hierarchy_segment( + self, + delimiter: str, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs + ) -> "_models.ListBlobsHierarchySegmentResponse": + """[Update] The List Blobs operation returns a list of the blobs under the specified container. + + :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix + element in the response body that acts as a placeholder for all blobs whose names begin with + the same substring up to the appearance of the delimiter character. The delimiter may be a + single character or a string. + :type delimiter: str + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. + :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} # type: ignore + + async def get_account_info( + self, + **kwargs + ) -> None: + """Returns the sku name and account kind. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "account" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_account_info.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) + response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {'url': '/{containerName}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_directory_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_directory_operations.py new file mode 100644 index 0000000..338ff69 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_directory_operations.py @@ -0,0 +1,739 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class DirectoryOperations: + """DirectoryOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create( + self, + timeout: Optional[int] = None, + directory_properties: Optional[str] = None, + posix_permissions: Optional[str] = None, + posix_umask: Optional[str] = None, + request_id_parameter: Optional[str] = None, + directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Create a directory. By default, the destination is overwritten and if the destination already + exists and has a lease the lease is broken. This operation supports conditional HTTP requests. + For more information, see `Specifying Conditional Headers for Blob Service Operations + `_. To fail if the destination already exists, use a conditional + request with If-None-Match: "*". + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param directory_properties: Optional. User-defined properties to be stored with the file or + directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", + where each value is base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask + restricts permission settings for file and directory, and will only be applied when default Acl + does not exist in parent directory. If the umask bit has set, it means that the corresponding + permission will be disabled. Otherwise the corresponding permission will be determined by the + permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, + a default umask - 0027 will be used. + :type posix_umask: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param directory_http_headers: Parameter group. + :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _cache_control = None + _content_type = None + _content_encoding = None + _content_language = None + _content_disposition = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + if directory_http_headers is not None: + _cache_control = directory_http_headers.cache_control + _content_type = directory_http_headers.content_type + _content_encoding = directory_http_headers.content_encoding + _content_language = directory_http_headers.content_language + _content_disposition = directory_http_headers.content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + resource = "directory" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['resource'] = self._serialize.query("resource", resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def rename( + self, + rename_source: str, + timeout: Optional[int] = None, + marker: Optional[str] = None, + path_rename_mode: Optional[Union[str, "_models.PathRenameMode"]] = None, + directory_properties: Optional[str] = None, + posix_permissions: Optional[str] = None, + posix_umask: Optional[str] = None, + source_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Rename a directory. By default, the destination is overwritten and if the destination already + exists and has a lease the lease is broken. This operation supports conditional HTTP requests. + For more information, see `Specifying Conditional Headers for Blob Service Operations + `_. To fail if the destination already exists, use a conditional + request with If-None-Match: "*". + + :param rename_source: The file or directory to be renamed. The value must have the following + format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will + overwrite the existing properties; otherwise, the existing properties will be preserved. + :type rename_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param marker: When renaming a directory, the number of paths that are renamed with each + invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation + token is returned in this response header. When a continuation token is returned in the + response, it must be specified in a subsequent invocation of the rename operation to continue + renaming the directory. + :type marker: str + :param path_rename_mode: Determines the behavior of the rename operation. + :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode + :param directory_properties: Optional. User-defined properties to be stored with the file or + directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", + where each value is base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask + restricts permission settings for file and directory, and will only be applied when default Acl + does not exist in parent directory. If the umask bit has set, it means that the corresponding + permission will be disabled. Otherwise the corresponding permission will be determined by the + permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, + a default umask - 0027 will be used. + :type posix_umask: str + :param source_lease_id: A lease ID for the source path. If specified, the source path must have + an active lease and the lease ID must match. + :type source_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param directory_http_headers: Parameter group. + :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _cache_control = None + _content_type = None + _content_encoding = None + _content_language = None + _content_disposition = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if directory_http_headers is not None: + _cache_control = directory_http_headers.cache_control + _content_type = directory_http_headers.content_type + _content_encoding = directory_http_headers.content_encoding + _content_language = directory_http_headers.content_language + _content_disposition = directory_http_headers.content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + accept = "application/xml" + + # Construct URL + url = self.rename.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') + if path_rename_mode is not None: + query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def delete( + self, + recursive_directory_delete: bool, + timeout: Optional[int] = None, + marker: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Deletes the directory. + + :param recursive_directory_delete: If "true", all paths beneath the directory will be deleted. + If "false" and the directory is non-empty, an error occurs. + :type recursive_directory_delete: bool + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param marker: When renaming a directory, the number of paths that are renamed with each + invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation + token is returned in this response header. When a continuation token is returned in the + response, it must be specified in a subsequent invocation of the rename operation to continue + renaming the directory. + :type marker: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') + if marker is not None: + query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def set_access_control( + self, + timeout: Optional[int] = None, + owner: Optional[str] = None, + group: Optional[str] = None, + posix_permissions: Optional[str] = None, + posix_acl: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Set the owner, group, permissions, or access control list for a directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_acl: Sets POSIX access control rights on files and directories. The value is a + comma-separated list of access control entries. Each access control entry (ACE) consists of a + scope, a type, a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type posix_acl: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "setAccessControl" + accept = "application/xml" + + # Construct URL + url = self.set_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def get_access_control( + self, + timeout: Optional[int] = None, + upn: Optional[bool] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Get the owner, group, permissions, or access control list for a directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If + "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response + headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If + "false", the values will be returned as Azure Active Directory Object IDs. The default value is + false. + :type upn: bool + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "getAccessControl" + accept = "application/xml" + + # Construct URL + url = self.get_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) + response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) + response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) + response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_page_blob_operations.py new file mode 100644 index 0000000..da920b5 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_page_blob_operations.py @@ -0,0 +1,1393 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class PageBlobOperations: + """PageBlobOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create( + self, + content_length: int, + blob_content_length: int, + timeout: Optional[int] = None, + tier: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] = None, + metadata: Optional[str] = None, + blob_sequence_number: Optional[int] = 0, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """The Create operation creates a new page blob. + + :param content_length: The length of the request. + :type content_length: long + :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 + TB. The page blob size must be aligned to a 512-byte boundary. + :type blob_content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param tier: Optional. Indicates the tier to be set on the page blob. + :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled + value that you can use to track requests. The value of the sequence number must be between 0 + and 2^63 - 1. + :type blob_sequence_number: long + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + blob_type = "PageBlob" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') + if blob_sequence_number is not None: + header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def upload_pages( + self, + content_length: int, + body: IO, + transactional_content_md5: Optional[bytearray] = None, + transactional_content_crc64: Optional[bytearray] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """The Upload Pages operation writes a range of pages to a page blob. + + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data. + :type body: IO + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param sequence_number_access_conditions: Parameter group. + :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if sequence_number_access_conditions is not None: + _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + comp = "page" + page_write = "update" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.upload_pages.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') + if _if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') + if _if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def clear_pages( + self, + content_length: int, + timeout: Optional[int] = None, + range: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """The Clear Pages operation clears a set of pages from a page blob. + + :param content_length: The length of the request. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param sequence_number_access_conditions: Parameter group. + :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if sequence_number_access_conditions is not None: + _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + comp = "page" + page_write = "clear" + accept = "application/xml" + + # Construct URL + url = self.clear_pages.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') + if _if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') + if _if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + clear_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def upload_pages_from_url( + self, + source_url: str, + source_range: str, + content_length: int, + range: str, + source_content_md5: Optional[bytearray] = None, + source_contentcrc64: Optional[bytearray] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """The Upload Pages operation writes a range of pages to a page blob where the contents are read + from a URL. + + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param source_range: Bytes of source data in the specified range. The length of this range + should match the ContentLength header and x-ms-range/Range destination range header. + :type source_range: str + :param content_length: The length of the request. + :type content_length: long + :param range: The range of bytes to which the source range would be written. The range should + be 512 aligned and range-end is required. + :type range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param sequence_number_access_conditions: Parameter group. + :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if sequence_number_access_conditions is not None: + _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + comp = "page" + page_write = "update" + accept = "application/xml" + + # Construct URL + url = self.upload_pages_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') + if _if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') + if _if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def get_page_ranges( + self, + snapshot: Optional[str] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> "_models.PageList": + """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot + of a page blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PageList, or the result of cls(response) + :rtype: ~azure.storage.blob.models.PageList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "pagelist" + accept = "application/xml" + + # Construct URL + url = self.get_page_ranges.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('PageList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def get_page_ranges_diff( + self, + snapshot: Optional[str] = None, + timeout: Optional[int] = None, + prevsnapshot: Optional[str] = None, + prev_snapshot_url: Optional[str] = None, + range: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> "_models.PageList": + """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that + were changed between target blob and previous snapshot. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a + DateTime value that specifies that the response will contain only pages that were changed + between target blob and previous snapshot. Changed pages include both updated and cleared + pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is + the older of the two. Note that incremental snapshots are currently supported only for blobs + created on or after January 1, 2016. + :type prevsnapshot: str + :param prev_snapshot_url: Optional. This header is only supported in service versions + 2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The + response will only contain pages that were changed between the target blob and its previous + snapshot. + :type prev_snapshot_url: str + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PageList, or the result of cls(response) + :rtype: ~azure.storage.blob.models.PageList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "pagelist" + accept = "application/xml" + + # Construct URL + url = self.get_page_ranges_diff.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if prevsnapshot is not None: + query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if prev_snapshot_url is not None: + header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('PageList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def resize( + self, + blob_content_length: int, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Resize the Blob. + + :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 + TB. The page blob size must be aligned to a 512-byte boundary. + :type blob_content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.resize.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + resize.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def update_sequence_number( + self, + sequence_number_action: Union[str, "_models.SequenceNumberActionType"], + timeout: Optional[int] = None, + blob_sequence_number: Optional[int] = 0, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Update the sequence number of the blob. + + :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the + request. This property applies to page blobs only. This property indicates how the service + should modify the blob's sequence number. + :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled + value that you can use to track requests. The value of the sequence number must be between 0 + and 2^63 - 1. + :type blob_sequence_number: long + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.update_sequence_number.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'str') + if blob_sequence_number is not None: + header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def copy_incremental( + self, + copy_source: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """The Copy Incremental operation copies a snapshot of the source page blob to a destination page + blob. The snapshot is copied such that only the differential changes between the previously + copied snapshot are transferred to the destination. The copied snapshots are complete copies of + the original snapshot and can be read or copied from as usual. This API is supported since REST + version 2016-05-31. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "incrementalcopy" + accept = "application/xml" + + # Construct URL + url = self.copy_incremental.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + + if cls: + return cls(pipeline_response, None, response_headers) + + copy_incremental.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_service_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_service_operations.py new file mode 100644 index 0000000..91a0646 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/aio/operations/_service_operations.py @@ -0,0 +1,691 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class ServiceOperations: + """ServiceOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def set_properties( + self, + storage_service_properties: "_models.StorageServiceProperties", + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs + ) -> None: + """Sets properties for a storage account's Blob service endpoint, including properties for Storage + Analytics and CORS (Cross-Origin Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. + :type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "properties" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.set_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {'url': '/'} # type: ignore + + async def get_properties( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs + ) -> "_models.StorageServiceProperties": + """gets the properties of a storage account's Blob service, including properties for Storage + Analytics and CORS (Cross-Origin Resource Sharing) rules. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageServiceProperties, or the result of cls(response) + :rtype: ~azure.storage.blob.models.StorageServiceProperties + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = self._deserialize('StorageServiceProperties', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_properties.metadata = {'url': '/'} # type: ignore + + async def get_statistics( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs + ) -> "_models.StorageServiceStats": + """Retrieves statistics related to replication for the Blob service. It is only available on the + secondary location endpoint when read-access geo-redundant replication is enabled for the + storage account. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageServiceStats, or the result of cls(response) + :rtype: ~azure.storage.blob.models.StorageServiceStats + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceStats"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "stats" + accept = "application/xml" + + # Construct URL + url = self.get_statistics.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('StorageServiceStats', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_statistics.metadata = {'url': '/'} # type: ignore + + async def list_containers_segment( + self, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs + ) -> "_models.ListContainersSegmentResponse": + """The List Containers Segment operation returns a list of the containers under the specified + account. + + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. + :type maxresults: int + :param include: Include this parameter to specify that the container's metadata be returned as + part of the response body. + :type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListContainersSegmentResponse, or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainersSegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_containers_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = self._deserialize('ListContainersSegmentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_containers_segment.metadata = {'url': '/'} # type: ignore + + async def get_user_delegation_key( + self, + key_info: "_models.KeyInfo", + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs + ) -> "_models.UserDelegationKey": + """Retrieves a user delegation key for the Blob service. This is only a valid operation when using + bearer token authentication. + + :param key_info: + :type key_info: ~azure.storage.blob.models.KeyInfo + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: UserDelegationKey, or the result of cls(response) + :rtype: ~azure.storage.blob.models.UserDelegationKey + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.UserDelegationKey"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "userdelegationkey" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.get_user_delegation_key.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(key_info, 'KeyInfo', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('UserDelegationKey', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_user_delegation_key.metadata = {'url': '/'} # type: ignore + + async def get_account_info( + self, + **kwargs + ) -> None: + """Returns the sku name and account kind. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "account" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_account_info.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) + response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) + response_headers['x-ms-is-hns-enabled']=self._deserialize('bool', response.headers.get('x-ms-is-hns-enabled')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {'url': '/'} # type: ignore + + async def submit_batch( + self, + content_length: int, + multipart_content_type: str, + body: IO, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs + ) -> IO: + """The Batch operation allows multiple API calls to be embedded into a single HTTP request. + + :param content_length: The length of the request. + :type content_length: long + :param multipart_content_type: Required. The value of this header must be multipart/mixed with + a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. + :type multipart_content_type: str + :param body: Initial data. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "batch" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.submit_batch.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(body, 'IO', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + submit_batch.metadata = {'url': '/'} # type: ignore + + async def filter_blobs( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + where: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + **kwargs + ) -> "_models.FilterBlobSegment": + """The Filter Blobs operation enables callers to list blobs across all containers whose tags match + a given search expression. Filter blobs searches across all containers within a storage + account but can be scoped within the expression to a single container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param where: Filters the results to return only to return only blobs whose tags match the + specified expression. + :type where: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. + :type maxresults: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: FilterBlobSegment, or the result of cls(response) + :rtype: ~azure.storage.blob.models.FilterBlobSegment + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "blobs" + accept = "application/xml" + + # Construct URL + url = self.filter_blobs.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if where is not None: + query_parameters['where'] = self._serialize.query("where", where, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('FilterBlobSegment', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + filter_blobs.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/__init__.py new file mode 100644 index 0000000..3d33d25 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/__init__.py @@ -0,0 +1,225 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import AccessPolicy + from ._models_py3 import AppendPositionAccessConditions + from ._models_py3 import ArrowConfiguration + from ._models_py3 import ArrowField + from ._models_py3 import BlobFlatListSegment + from ._models_py3 import BlobHTTPHeaders + from ._models_py3 import BlobHierarchyListSegment + from ._models_py3 import BlobItemInternal + from ._models_py3 import BlobMetadata + from ._models_py3 import BlobPrefix + from ._models_py3 import BlobPropertiesInternal + from ._models_py3 import BlobTag + from ._models_py3 import BlobTags + from ._models_py3 import Block + from ._models_py3 import BlockList + from ._models_py3 import BlockLookupList + from ._models_py3 import ClearRange + from ._models_py3 import ContainerCpkScopeInfo + from ._models_py3 import ContainerItem + from ._models_py3 import ContainerProperties + from ._models_py3 import CorsRule + from ._models_py3 import CpkInfo + from ._models_py3 import CpkScopeInfo + from ._models_py3 import DataLakeStorageError + from ._models_py3 import DataLakeStorageErrorAutoGenerated + from ._models_py3 import DelimitedTextConfiguration + from ._models_py3 import DirectoryHttpHeaders + from ._models_py3 import FilterBlobItem + from ._models_py3 import FilterBlobSegment + from ._models_py3 import GeoReplication + from ._models_py3 import JsonTextConfiguration + from ._models_py3 import KeyInfo + from ._models_py3 import LeaseAccessConditions + from ._models_py3 import ListBlobsFlatSegmentResponse + from ._models_py3 import ListBlobsHierarchySegmentResponse + from ._models_py3 import ListContainersSegmentResponse + from ._models_py3 import Logging + from ._models_py3 import Metrics + from ._models_py3 import ModifiedAccessConditions + from ._models_py3 import PageList + from ._models_py3 import PageRange + from ._models_py3 import QueryFormat + from ._models_py3 import QueryRequest + from ._models_py3 import QuerySerialization + from ._models_py3 import RetentionPolicy + from ._models_py3 import SequenceNumberAccessConditions + from ._models_py3 import SignedIdentifier + from ._models_py3 import SourceModifiedAccessConditions + from ._models_py3 import StaticWebsite + from ._models_py3 import StorageError + from ._models_py3 import StorageServiceProperties + from ._models_py3 import StorageServiceStats + from ._models_py3 import UserDelegationKey +except (SyntaxError, ImportError): + from ._models import AccessPolicy # type: ignore + from ._models import AppendPositionAccessConditions # type: ignore + from ._models import ArrowConfiguration # type: ignore + from ._models import ArrowField # type: ignore + from ._models import BlobFlatListSegment # type: ignore + from ._models import BlobHTTPHeaders # type: ignore + from ._models import BlobHierarchyListSegment # type: ignore + from ._models import BlobItemInternal # type: ignore + from ._models import BlobMetadata # type: ignore + from ._models import BlobPrefix # type: ignore + from ._models import BlobPropertiesInternal # type: ignore + from ._models import BlobTag # type: ignore + from ._models import BlobTags # type: ignore + from ._models import Block # type: ignore + from ._models import BlockList # type: ignore + from ._models import BlockLookupList # type: ignore + from ._models import ClearRange # type: ignore + from ._models import ContainerCpkScopeInfo # type: ignore + from ._models import ContainerItem # type: ignore + from ._models import ContainerProperties # type: ignore + from ._models import CorsRule # type: ignore + from ._models import CpkInfo # type: ignore + from ._models import CpkScopeInfo # type: ignore + from ._models import DataLakeStorageError # type: ignore + from ._models import DataLakeStorageErrorAutoGenerated # type: ignore + from ._models import DelimitedTextConfiguration # type: ignore + from ._models import DirectoryHttpHeaders # type: ignore + from ._models import FilterBlobItem # type: ignore + from ._models import FilterBlobSegment # type: ignore + from ._models import GeoReplication # type: ignore + from ._models import JsonTextConfiguration # type: ignore + from ._models import KeyInfo # type: ignore + from ._models import LeaseAccessConditions # type: ignore + from ._models import ListBlobsFlatSegmentResponse # type: ignore + from ._models import ListBlobsHierarchySegmentResponse # type: ignore + from ._models import ListContainersSegmentResponse # type: ignore + from ._models import Logging # type: ignore + from ._models import Metrics # type: ignore + from ._models import ModifiedAccessConditions # type: ignore + from ._models import PageList # type: ignore + from ._models import PageRange # type: ignore + from ._models import QueryFormat # type: ignore + from ._models import QueryRequest # type: ignore + from ._models import QuerySerialization # type: ignore + from ._models import RetentionPolicy # type: ignore + from ._models import SequenceNumberAccessConditions # type: ignore + from ._models import SignedIdentifier # type: ignore + from ._models import SourceModifiedAccessConditions # type: ignore + from ._models import StaticWebsite # type: ignore + from ._models import StorageError # type: ignore + from ._models import StorageServiceProperties # type: ignore + from ._models import StorageServiceStats # type: ignore + from ._models import UserDelegationKey # type: ignore + +from ._azure_blob_storage_enums import ( + AccessTier, + AccessTierOptional, + AccessTierRequired, + AccountKind, + ArchiveStatus, + BlobExpiryOptions, + BlobType, + BlockListType, + CopyStatusType, + DeleteSnapshotsOptionType, + EncryptionAlgorithmType, + GeoReplicationStatusType, + LeaseDurationType, + LeaseStateType, + LeaseStatusType, + ListBlobsIncludeItem, + ListContainersIncludeType, + PathRenameMode, + PremiumPageBlobAccessTier, + PublicAccessType, + QueryFormatType, + RehydratePriority, + SequenceNumberActionType, + SkuName, + StorageErrorCode, +) + +__all__ = [ + 'AccessPolicy', + 'AppendPositionAccessConditions', + 'ArrowConfiguration', + 'ArrowField', + 'BlobFlatListSegment', + 'BlobHTTPHeaders', + 'BlobHierarchyListSegment', + 'BlobItemInternal', + 'BlobMetadata', + 'BlobPrefix', + 'BlobPropertiesInternal', + 'BlobTag', + 'BlobTags', + 'Block', + 'BlockList', + 'BlockLookupList', + 'ClearRange', + 'ContainerCpkScopeInfo', + 'ContainerItem', + 'ContainerProperties', + 'CorsRule', + 'CpkInfo', + 'CpkScopeInfo', + 'DataLakeStorageError', + 'DataLakeStorageErrorAutoGenerated', + 'DelimitedTextConfiguration', + 'DirectoryHttpHeaders', + 'FilterBlobItem', + 'FilterBlobSegment', + 'GeoReplication', + 'JsonTextConfiguration', + 'KeyInfo', + 'LeaseAccessConditions', + 'ListBlobsFlatSegmentResponse', + 'ListBlobsHierarchySegmentResponse', + 'ListContainersSegmentResponse', + 'Logging', + 'Metrics', + 'ModifiedAccessConditions', + 'PageList', + 'PageRange', + 'QueryFormat', + 'QueryRequest', + 'QuerySerialization', + 'RetentionPolicy', + 'SequenceNumberAccessConditions', + 'SignedIdentifier', + 'SourceModifiedAccessConditions', + 'StaticWebsite', + 'StorageError', + 'StorageServiceProperties', + 'StorageServiceStats', + 'UserDelegationKey', + 'AccessTier', + 'AccessTierOptional', + 'AccessTierRequired', + 'AccountKind', + 'ArchiveStatus', + 'BlobExpiryOptions', + 'BlobType', + 'BlockListType', + 'CopyStatusType', + 'DeleteSnapshotsOptionType', + 'EncryptionAlgorithmType', + 'GeoReplicationStatusType', + 'LeaseDurationType', + 'LeaseStateType', + 'LeaseStatusType', + 'ListBlobsIncludeItem', + 'ListContainersIncludeType', + 'PathRenameMode', + 'PremiumPageBlobAccessTier', + 'PublicAccessType', + 'QueryFormatType', + 'RehydratePriority', + 'SequenceNumberActionType', + 'SkuName', + 'StorageErrorCode', +] diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_azure_blob_storage_enums.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_azure_blob_storage_enums.py new file mode 100644 index 0000000..5d03a10 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_azure_blob_storage_enums.py @@ -0,0 +1,339 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum, EnumMeta +from six import with_metaclass + +class _CaseInsensitiveEnumMeta(EnumMeta): + def __getitem__(self, name): + return super().__getitem__(name.upper()) + + def __getattr__(cls, name): + """Return the enum member matching `name` + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + """ + try: + return cls._member_map_[name.upper()] + except KeyError: + raise AttributeError(name) + + +class AccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + P4 = "P4" + P6 = "P6" + P10 = "P10" + P15 = "P15" + P20 = "P20" + P30 = "P30" + P40 = "P40" + P50 = "P50" + P60 = "P60" + P70 = "P70" + P80 = "P80" + HOT = "Hot" + COOL = "Cool" + ARCHIVE = "Archive" + +class AccessTierOptional(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + P4 = "P4" + P6 = "P6" + P10 = "P10" + P15 = "P15" + P20 = "P20" + P30 = "P30" + P40 = "P40" + P50 = "P50" + P60 = "P60" + P70 = "P70" + P80 = "P80" + HOT = "Hot" + COOL = "Cool" + ARCHIVE = "Archive" + +class AccessTierRequired(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + P4 = "P4" + P6 = "P6" + P10 = "P10" + P15 = "P15" + P20 = "P20" + P30 = "P30" + P40 = "P40" + P50 = "P50" + P60 = "P60" + P70 = "P70" + P80 = "P80" + HOT = "Hot" + COOL = "Cool" + ARCHIVE = "Archive" + +class AccountKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + STORAGE = "Storage" + BLOB_STORAGE = "BlobStorage" + STORAGE_V2 = "StorageV2" + FILE_STORAGE = "FileStorage" + BLOCK_BLOB_STORAGE = "BlockBlobStorage" + +class ArchiveStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + REHYDRATE_PENDING_TO_HOT = "rehydrate-pending-to-hot" + REHYDRATE_PENDING_TO_COOL = "rehydrate-pending-to-cool" + +class BlobExpiryOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + NEVER_EXPIRE = "NeverExpire" + RELATIVE_TO_CREATION = "RelativeToCreation" + RELATIVE_TO_NOW = "RelativeToNow" + ABSOLUTE = "Absolute" + +class BlobType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + BLOCK_BLOB = "BlockBlob" + PAGE_BLOB = "PageBlob" + APPEND_BLOB = "AppendBlob" + +class BlockListType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + COMMITTED = "committed" + UNCOMMITTED = "uncommitted" + ALL = "all" + +class CopyStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + PENDING = "pending" + SUCCESS = "success" + ABORTED = "aborted" + FAILED = "failed" + +class DeleteSnapshotsOptionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + INCLUDE = "include" + ONLY = "only" + +class EncryptionAlgorithmType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + NONE = "None" + AES256 = "AES256" + +class GeoReplicationStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The status of the secondary location + """ + + LIVE = "live" + BOOTSTRAP = "bootstrap" + UNAVAILABLE = "unavailable" + +class LeaseDurationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + INFINITE = "infinite" + FIXED = "fixed" + +class LeaseStateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + AVAILABLE = "available" + LEASED = "leased" + EXPIRED = "expired" + BREAKING = "breaking" + BROKEN = "broken" + +class LeaseStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + LOCKED = "locked" + UNLOCKED = "unlocked" + +class ListBlobsIncludeItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + COPY = "copy" + DELETED = "deleted" + METADATA = "metadata" + SNAPSHOTS = "snapshots" + UNCOMMITTEDBLOBS = "uncommittedblobs" + VERSIONS = "versions" + TAGS = "tags" + +class ListContainersIncludeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + METADATA = "metadata" + DELETED = "deleted" + +class PathRenameMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + LEGACY = "legacy" + POSIX = "posix" + +class PremiumPageBlobAccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + P4 = "P4" + P6 = "P6" + P10 = "P10" + P15 = "P15" + P20 = "P20" + P30 = "P30" + P40 = "P40" + P50 = "P50" + P60 = "P60" + P70 = "P70" + P80 = "P80" + +class PublicAccessType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + CONTAINER = "container" + BLOB = "blob" + +class QueryFormatType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The quick query format type. + """ + + DELIMITED = "delimited" + JSON = "json" + ARROW = "arrow" + +class RehydratePriority(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """If an object is in rehydrate pending state then this header is returned with priority of + rehydrate. Valid values are High and Standard. + """ + + HIGH = "High" + STANDARD = "Standard" + +class SequenceNumberActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + MAX = "max" + UPDATE = "update" + INCREMENT = "increment" + +class SkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + STANDARD_LRS = "Standard_LRS" + STANDARD_GRS = "Standard_GRS" + STANDARD_RAGRS = "Standard_RAGRS" + STANDARD_ZRS = "Standard_ZRS" + PREMIUM_LRS = "Premium_LRS" + +class StorageErrorCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Error codes returned by the service + """ + + ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists" + ACCOUNT_BEING_CREATED = "AccountBeingCreated" + ACCOUNT_IS_DISABLED = "AccountIsDisabled" + AUTHENTICATION_FAILED = "AuthenticationFailed" + AUTHORIZATION_FAILURE = "AuthorizationFailure" + CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported" + CONDITION_NOT_MET = "ConditionNotMet" + EMPTY_METADATA_KEY = "EmptyMetadataKey" + INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions" + INTERNAL_ERROR = "InternalError" + INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo" + INVALID_HEADER_VALUE = "InvalidHeaderValue" + INVALID_HTTP_VERB = "InvalidHttpVerb" + INVALID_INPUT = "InvalidInput" + INVALID_MD5 = "InvalidMd5" + INVALID_METADATA = "InvalidMetadata" + INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue" + INVALID_RANGE = "InvalidRange" + INVALID_RESOURCE_NAME = "InvalidResourceName" + INVALID_URI = "InvalidUri" + INVALID_XML_DOCUMENT = "InvalidXmlDocument" + INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue" + MD5_MISMATCH = "Md5Mismatch" + METADATA_TOO_LARGE = "MetadataTooLarge" + MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader" + MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter" + MISSING_REQUIRED_HEADER = "MissingRequiredHeader" + MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode" + MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported" + OPERATION_TIMED_OUT = "OperationTimedOut" + OUT_OF_RANGE_INPUT = "OutOfRangeInput" + OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue" + REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge" + RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch" + REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse" + RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists" + RESOURCE_NOT_FOUND = "ResourceNotFound" + SERVER_BUSY = "ServerBusy" + UNSUPPORTED_HEADER = "UnsupportedHeader" + UNSUPPORTED_XML_NODE = "UnsupportedXmlNode" + UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter" + UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb" + APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet" + BLOB_ALREADY_EXISTS = "BlobAlreadyExists" + BLOB_IMMUTABLE_DUE_TO_POLICY = "BlobImmutableDueToPolicy" + BLOB_NOT_FOUND = "BlobNotFound" + BLOB_OVERWRITTEN = "BlobOverwritten" + BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength" + BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit" + BLOCK_LIST_TOO_LONG = "BlockListTooLong" + CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier" + CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource" + CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists" + CONTAINER_BEING_DELETED = "ContainerBeingDeleted" + CONTAINER_DISABLED = "ContainerDisabled" + CONTAINER_NOT_FOUND = "ContainerNotFound" + CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit" + COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported" + COPY_ID_MISMATCH = "CopyIdMismatch" + FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch" + INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch" + INCREMENTAL_COPY_OF_ERALIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot" + INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired" + INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock" + INVALID_BLOB_TIER = "InvalidBlobTier" + INVALID_BLOB_TYPE = "InvalidBlobType" + INVALID_BLOCK_ID = "InvalidBlockId" + INVALID_BLOCK_LIST = "InvalidBlockList" + INVALID_OPERATION = "InvalidOperation" + INVALID_PAGE_RANGE = "InvalidPageRange" + INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType" + INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl" + INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation" + LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent" + LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken" + LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation" + LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation" + LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation" + LEASE_ID_MISSING = "LeaseIdMissing" + LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired" + LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged" + LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed" + LEASE_LOST = "LeaseLost" + LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation" + LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation" + LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation" + MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet" + NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation" + NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation" + OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob" + PENDING_COPY_OPERATION = "PendingCopyOperation" + PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer" + PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound" + PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported" + SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet" + SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge" + SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded" + SNAPHOT_OPERATION_RATE_EXCEEDED = "SnaphotOperationRateExceeded" + SNAPSHOTS_PRESENT = "SnapshotsPresent" + SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet" + SYSTEM_IN_USE = "SystemInUse" + TARGET_CONDITION_NOT_MET = "TargetConditionNotMet" + UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite" + BLOB_BEING_REHYDRATED = "BlobBeingRehydrated" + BLOB_ARCHIVED = "BlobArchived" + BLOB_NOT_ARCHIVED = "BlobNotArchived" + AUTHORIZATION_SOURCE_IP_MISMATCH = "AuthorizationSourceIPMismatch" + AUTHORIZATION_PROTOCOL_MISMATCH = "AuthorizationProtocolMismatch" + AUTHORIZATION_PERMISSION_MISMATCH = "AuthorizationPermissionMismatch" + AUTHORIZATION_SERVICE_MISMATCH = "AuthorizationServiceMismatch" + AUTHORIZATION_RESOURCE_TYPE_MISMATCH = "AuthorizationResourceTypeMismatch" diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_models.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_models.py new file mode 100644 index 0000000..fadcdd5 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_models.py @@ -0,0 +1,2031 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import HttpResponseError +import msrest.serialization + + +class AccessPolicy(msrest.serialization.Model): + """An Access policy. + + :param start: the date-time the policy is active. + :type start: str + :param expiry: the date-time the policy expires. + :type expiry: str + :param permission: the permissions for the acl policy. + :type permission: str + """ + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str'}, + 'expiry': {'key': 'Expiry', 'type': 'str'}, + 'permission': {'key': 'Permission', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(AccessPolicy, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.expiry = kwargs.get('expiry', None) + self.permission = kwargs.get('permission', None) + + +class AppendPositionAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param max_size: Optional conditional header. The max length in bytes permitted for the append + blob. If the Append Block operation would cause the blob to exceed that limit or if the blob + size is already greater than the value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :type max_size: long + :param append_position: Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will succeed only if the append + position is equal to this number. If it is not, the request will fail with the + AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + :type append_position: long + """ + + _attribute_map = { + 'max_size': {'key': 'maxSize', 'type': 'long'}, + 'append_position': {'key': 'appendPosition', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(AppendPositionAccessConditions, self).__init__(**kwargs) + self.max_size = kwargs.get('max_size', None) + self.append_position = kwargs.get('append_position', None) + + +class ArrowConfiguration(msrest.serialization.Model): + """arrow configuration. + + All required parameters must be populated in order to send to Azure. + + :param schema: Required. + :type schema: list[~azure.storage.blob.models.ArrowField] + """ + + _validation = { + 'schema': {'required': True}, + } + + _attribute_map = { + 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'wrapped': True, 'itemsName': 'Field'}}, + } + _xml_map = { + 'name': 'ArrowConfiguration' + } + + def __init__( + self, + **kwargs + ): + super(ArrowConfiguration, self).__init__(**kwargs) + self.schema = kwargs['schema'] + + +class ArrowField(msrest.serialization.Model): + """field of an arrow schema. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. + :type type: str + :param name: + :type name: str + :param precision: + :type precision: int + :param scale: + :type scale: int + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str'}, + 'name': {'key': 'Name', 'type': 'str'}, + 'precision': {'key': 'Precision', 'type': 'int'}, + 'scale': {'key': 'Scale', 'type': 'int'}, + } + _xml_map = { + 'name': 'Field' + } + + def __init__( + self, + **kwargs + ): + super(ArrowField, self).__init__(**kwargs) + self.type = kwargs['type'] + self.name = kwargs.get('name', None) + self.precision = kwargs.get('precision', None) + self.scale = kwargs.get('scale', None) + + +class BlobFlatListSegment(msrest.serialization.Model): + """BlobFlatListSegment. + + All required parameters must be populated in order to send to Azure. + + :param blob_items: Required. + :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] + """ + + _validation = { + 'blob_items': {'required': True}, + } + + _attribute_map = { + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, + } + _xml_map = { + 'name': 'Blobs' + } + + def __init__( + self, + **kwargs + ): + super(BlobFlatListSegment, self).__init__(**kwargs) + self.blob_items = kwargs['blob_items'] + + +class BlobHierarchyListSegment(msrest.serialization.Model): + """BlobHierarchyListSegment. + + All required parameters must be populated in order to send to Azure. + + :param blob_prefixes: + :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] + :param blob_items: Required. + :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] + """ + + _validation = { + 'blob_items': {'required': True}, + } + + _attribute_map = { + 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix'}}, + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, + } + _xml_map = { + 'name': 'Blobs' + } + + def __init__( + self, + **kwargs + ): + super(BlobHierarchyListSegment, self).__init__(**kwargs) + self.blob_prefixes = kwargs.get('blob_prefixes', None) + self.blob_items = kwargs['blob_items'] + + +class BlobHTTPHeaders(msrest.serialization.Model): + """Parameter group. + + :param blob_cache_control: Optional. Sets the blob's cache control. If specified, this property + is stored with the blob and returned with a read request. + :type blob_cache_control: str + :param blob_content_type: Optional. Sets the blob's content type. If specified, this property + is stored with the blob and returned with a read request. + :type blob_content_type: str + :param blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash is not + validated, as the hashes for the individual blocks were validated when each was uploaded. + :type blob_content_md5: bytearray + :param blob_content_encoding: Optional. Sets the blob's content encoding. If specified, this + property is stored with the blob and returned with a read request. + :type blob_content_encoding: str + :param blob_content_language: Optional. Set the blob's content language. If specified, this + property is stored with the blob and returned with a read request. + :type blob_content_language: str + :param blob_content_disposition: Optional. Sets the blob's Content-Disposition header. + :type blob_content_disposition: str + """ + + _attribute_map = { + 'blob_cache_control': {'key': 'blobCacheControl', 'type': 'str'}, + 'blob_content_type': {'key': 'blobContentType', 'type': 'str'}, + 'blob_content_md5': {'key': 'blobContentMD5', 'type': 'bytearray'}, + 'blob_content_encoding': {'key': 'blobContentEncoding', 'type': 'str'}, + 'blob_content_language': {'key': 'blobContentLanguage', 'type': 'str'}, + 'blob_content_disposition': {'key': 'blobContentDisposition', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(BlobHTTPHeaders, self).__init__(**kwargs) + self.blob_cache_control = kwargs.get('blob_cache_control', None) + self.blob_content_type = kwargs.get('blob_content_type', None) + self.blob_content_md5 = kwargs.get('blob_content_md5', None) + self.blob_content_encoding = kwargs.get('blob_content_encoding', None) + self.blob_content_language = kwargs.get('blob_content_language', None) + self.blob_content_disposition = kwargs.get('blob_content_disposition', None) + + +class BlobItemInternal(msrest.serialization.Model): + """An Azure Storage blob. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param deleted: Required. + :type deleted: bool + :param snapshot: Required. + :type snapshot: str + :param version_id: + :type version_id: str + :param is_current_version: + :type is_current_version: bool + :param properties: Required. Properties of a blob. + :type properties: ~azure.storage.blob.models.BlobPropertiesInternal + :param metadata: + :type metadata: ~azure.storage.blob.models.BlobMetadata + :param blob_tags: Blob tags. + :type blob_tags: ~azure.storage.blob.models.BlobTags + :param object_replication_metadata: Dictionary of :code:``. + :type object_replication_metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + 'deleted': {'required': True}, + 'snapshot': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'deleted': {'key': 'Deleted', 'type': 'bool'}, + 'snapshot': {'key': 'Snapshot', 'type': 'str'}, + 'version_id': {'key': 'VersionId', 'type': 'str'}, + 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, + 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, + 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata'}, + 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags'}, + 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}'}, + } + _xml_map = { + 'name': 'Blob' + } + + def __init__( + self, + **kwargs + ): + super(BlobItemInternal, self).__init__(**kwargs) + self.name = kwargs['name'] + self.deleted = kwargs['deleted'] + self.snapshot = kwargs['snapshot'] + self.version_id = kwargs.get('version_id', None) + self.is_current_version = kwargs.get('is_current_version', None) + self.properties = kwargs['properties'] + self.metadata = kwargs.get('metadata', None) + self.blob_tags = kwargs.get('blob_tags', None) + self.object_replication_metadata = kwargs.get('object_replication_metadata', None) + + +class BlobMetadata(msrest.serialization.Model): + """BlobMetadata. + + :param additional_properties: Unmatched properties from the message are deserialized to this + collection. + :type additional_properties: dict[str, str] + :param encrypted: + :type encrypted: str + """ + + _attribute_map = { + 'additional_properties': {'key': '', 'type': '{str}'}, + 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'attr': True}}, + } + _xml_map = { + 'name': 'Metadata' + } + + def __init__( + self, + **kwargs + ): + super(BlobMetadata, self).__init__(**kwargs) + self.additional_properties = kwargs.get('additional_properties', None) + self.encrypted = kwargs.get('encrypted', None) + + +class BlobPrefix(msrest.serialization.Model): + """BlobPrefix. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(BlobPrefix, self).__init__(**kwargs) + self.name = kwargs['name'] + + +class BlobPropertiesInternal(msrest.serialization.Model): + """Properties of a blob. + + All required parameters must be populated in order to send to Azure. + + :param creation_time: + :type creation_time: ~datetime.datetime + :param last_modified: Required. + :type last_modified: ~datetime.datetime + :param etag: Required. + :type etag: str + :param content_length: Size in bytes. + :type content_length: long + :param content_type: + :type content_type: str + :param content_encoding: + :type content_encoding: str + :param content_language: + :type content_language: str + :param content_md5: + :type content_md5: bytearray + :param content_disposition: + :type content_disposition: str + :param cache_control: + :type cache_control: str + :param blob_sequence_number: + :type blob_sequence_number: long + :param blob_type: Possible values include: "BlockBlob", "PageBlob", "AppendBlob". + :type blob_type: str or ~azure.storage.blob.models.BlobType + :param lease_status: Possible values include: "locked", "unlocked". + :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType + :param lease_state: Possible values include: "available", "leased", "expired", "breaking", + "broken". + :type lease_state: str or ~azure.storage.blob.models.LeaseStateType + :param lease_duration: Possible values include: "infinite", "fixed". + :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType + :param copy_id: + :type copy_id: str + :param copy_status: Possible values include: "pending", "success", "aborted", "failed". + :type copy_status: str or ~azure.storage.blob.models.CopyStatusType + :param copy_source: + :type copy_source: str + :param copy_progress: + :type copy_progress: str + :param copy_completion_time: + :type copy_completion_time: ~datetime.datetime + :param copy_status_description: + :type copy_status_description: str + :param server_encrypted: + :type server_encrypted: bool + :param incremental_copy: + :type incremental_copy: bool + :param destination_snapshot: + :type destination_snapshot: str + :param deleted_time: + :type deleted_time: ~datetime.datetime + :param remaining_retention_days: + :type remaining_retention_days: int + :param access_tier: Possible values include: "P4", "P6", "P10", "P15", "P20", "P30", "P40", + "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive". + :type access_tier: str or ~azure.storage.blob.models.AccessTier + :param access_tier_inferred: + :type access_tier_inferred: bool + :param archive_status: Possible values include: "rehydrate-pending-to-hot", "rehydrate- + pending-to-cool". + :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus + :param customer_provided_key_sha256: + :type customer_provided_key_sha256: str + :param encryption_scope: The name of the encryption scope under which the blob is encrypted. + :type encryption_scope: str + :param access_tier_change_time: + :type access_tier_change_time: ~datetime.datetime + :param tag_count: + :type tag_count: int + :param expires_on: + :type expires_on: ~datetime.datetime + :param is_sealed: + :type is_sealed: bool + :param rehydrate_priority: If an object is in rehydrate pending state then this header is + returned with priority of rehydrate. Valid values are High and Standard. Possible values + include: "High", "Standard". + :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :param last_accessed_on: + :type last_accessed_on: ~datetime.datetime + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + } + + _attribute_map = { + 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, + 'etag': {'key': 'Etag', 'type': 'str'}, + 'content_length': {'key': 'Content-Length', 'type': 'long'}, + 'content_type': {'key': 'Content-Type', 'type': 'str'}, + 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, + 'content_language': {'key': 'Content-Language', 'type': 'str'}, + 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, + 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, + 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, + 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, + 'blob_type': {'key': 'BlobType', 'type': 'str'}, + 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, + 'lease_state': {'key': 'LeaseState', 'type': 'str'}, + 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, + 'copy_id': {'key': 'CopyId', 'type': 'str'}, + 'copy_status': {'key': 'CopyStatus', 'type': 'str'}, + 'copy_source': {'key': 'CopySource', 'type': 'str'}, + 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, + 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, + 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, + 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, + 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, + 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, + 'access_tier': {'key': 'AccessTier', 'type': 'str'}, + 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, + 'archive_status': {'key': 'ArchiveStatus', 'type': 'str'}, + 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, + 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, + 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, + 'tag_count': {'key': 'TagCount', 'type': 'int'}, + 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, + 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, + 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str'}, + 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, + } + _xml_map = { + 'name': 'Properties' + } + + def __init__( + self, + **kwargs + ): + super(BlobPropertiesInternal, self).__init__(**kwargs) + self.creation_time = kwargs.get('creation_time', None) + self.last_modified = kwargs['last_modified'] + self.etag = kwargs['etag'] + self.content_length = kwargs.get('content_length', None) + self.content_type = kwargs.get('content_type', None) + self.content_encoding = kwargs.get('content_encoding', None) + self.content_language = kwargs.get('content_language', None) + self.content_md5 = kwargs.get('content_md5', None) + self.content_disposition = kwargs.get('content_disposition', None) + self.cache_control = kwargs.get('cache_control', None) + self.blob_sequence_number = kwargs.get('blob_sequence_number', None) + self.blob_type = kwargs.get('blob_type', None) + self.lease_status = kwargs.get('lease_status', None) + self.lease_state = kwargs.get('lease_state', None) + self.lease_duration = kwargs.get('lease_duration', None) + self.copy_id = kwargs.get('copy_id', None) + self.copy_status = kwargs.get('copy_status', None) + self.copy_source = kwargs.get('copy_source', None) + self.copy_progress = kwargs.get('copy_progress', None) + self.copy_completion_time = kwargs.get('copy_completion_time', None) + self.copy_status_description = kwargs.get('copy_status_description', None) + self.server_encrypted = kwargs.get('server_encrypted', None) + self.incremental_copy = kwargs.get('incremental_copy', None) + self.destination_snapshot = kwargs.get('destination_snapshot', None) + self.deleted_time = kwargs.get('deleted_time', None) + self.remaining_retention_days = kwargs.get('remaining_retention_days', None) + self.access_tier = kwargs.get('access_tier', None) + self.access_tier_inferred = kwargs.get('access_tier_inferred', None) + self.archive_status = kwargs.get('archive_status', None) + self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None) + self.encryption_scope = kwargs.get('encryption_scope', None) + self.access_tier_change_time = kwargs.get('access_tier_change_time', None) + self.tag_count = kwargs.get('tag_count', None) + self.expires_on = kwargs.get('expires_on', None) + self.is_sealed = kwargs.get('is_sealed', None) + self.rehydrate_priority = kwargs.get('rehydrate_priority', None) + self.last_accessed_on = kwargs.get('last_accessed_on', None) + + +class BlobTag(msrest.serialization.Model): + """BlobTag. + + All required parameters must be populated in order to send to Azure. + + :param key: Required. + :type key: str + :param value: Required. + :type value: str + """ + + _validation = { + 'key': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'key': {'key': 'Key', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'str'}, + } + _xml_map = { + 'name': 'Tag' + } + + def __init__( + self, + **kwargs + ): + super(BlobTag, self).__init__(**kwargs) + self.key = kwargs['key'] + self.value = kwargs['value'] + + +class BlobTags(msrest.serialization.Model): + """Blob tags. + + All required parameters must be populated in order to send to Azure. + + :param blob_tag_set: Required. + :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] + """ + + _validation = { + 'blob_tag_set': {'required': True}, + } + + _attribute_map = { + 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'wrapped': True, 'itemsName': 'Tag'}}, + } + _xml_map = { + 'name': 'Tags' + } + + def __init__( + self, + **kwargs + ): + super(BlobTags, self).__init__(**kwargs) + self.blob_tag_set = kwargs['blob_tag_set'] + + +class Block(msrest.serialization.Model): + """Represents a single block in a block blob. It describes the block's ID and size. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The base64 encoded block ID. + :type name: str + :param size: Required. The block size in bytes. + :type size: int + """ + + _validation = { + 'name': {'required': True}, + 'size': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'size': {'key': 'Size', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(Block, self).__init__(**kwargs) + self.name = kwargs['name'] + self.size = kwargs['size'] + + +class BlockList(msrest.serialization.Model): + """BlockList. + + :param committed_blocks: + :type committed_blocks: list[~azure.storage.blob.models.Block] + :param uncommitted_blocks: + :type uncommitted_blocks: list[~azure.storage.blob.models.Block] + """ + + _attribute_map = { + 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, + 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, + } + + def __init__( + self, + **kwargs + ): + super(BlockList, self).__init__(**kwargs) + self.committed_blocks = kwargs.get('committed_blocks', None) + self.uncommitted_blocks = kwargs.get('uncommitted_blocks', None) + + +class BlockLookupList(msrest.serialization.Model): + """BlockLookupList. + + :param committed: + :type committed: list[str] + :param uncommitted: + :type uncommitted: list[str] + :param latest: + :type latest: list[str] + """ + + _attribute_map = { + 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'itemsName': 'Committed'}}, + 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'itemsName': 'Uncommitted'}}, + 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'itemsName': 'Latest'}}, + } + _xml_map = { + 'name': 'BlockList' + } + + def __init__( + self, + **kwargs + ): + super(BlockLookupList, self).__init__(**kwargs) + self.committed = kwargs.get('committed', None) + self.uncommitted = kwargs.get('uncommitted', None) + self.latest = kwargs.get('latest', None) + + +class ClearRange(msrest.serialization.Model): + """ClearRange. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. + :type start: long + :param end: Required. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'ClearRange' + } + + def __init__( + self, + **kwargs + ): + super(ClearRange, self).__init__(**kwargs) + self.start = kwargs['start'] + self.end = kwargs['end'] + + +class ContainerCpkScopeInfo(msrest.serialization.Model): + """Parameter group. + + :param default_encryption_scope: Optional. Version 2019-07-07 and later. Specifies the + default encryption scope to set on the container and use for all future writes. + :type default_encryption_scope: str + :param prevent_encryption_scope_override: Optional. Version 2019-07-07 and newer. If true, + prevents any request from specifying a different encryption scope than the scope set on the + container. + :type prevent_encryption_scope_override: bool + """ + + _attribute_map = { + 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, + 'prevent_encryption_scope_override': {'key': 'PreventEncryptionScopeOverride', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(ContainerCpkScopeInfo, self).__init__(**kwargs) + self.default_encryption_scope = kwargs.get('default_encryption_scope', None) + self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) + + +class ContainerItem(msrest.serialization.Model): + """An Azure Storage container. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param deleted: + :type deleted: bool + :param version: + :type version: str + :param properties: Required. Properties of a container. + :type properties: ~azure.storage.blob.models.ContainerProperties + :param metadata: Dictionary of :code:``. + :type metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'deleted': {'key': 'Deleted', 'type': 'bool'}, + 'version': {'key': 'Version', 'type': 'str'}, + 'properties': {'key': 'Properties', 'type': 'ContainerProperties'}, + 'metadata': {'key': 'Metadata', 'type': '{str}'}, + } + _xml_map = { + 'name': 'Container' + } + + def __init__( + self, + **kwargs + ): + super(ContainerItem, self).__init__(**kwargs) + self.name = kwargs['name'] + self.deleted = kwargs.get('deleted', None) + self.version = kwargs.get('version', None) + self.properties = kwargs['properties'] + self.metadata = kwargs.get('metadata', None) + + +class ContainerProperties(msrest.serialization.Model): + """Properties of a container. + + All required parameters must be populated in order to send to Azure. + + :param last_modified: Required. + :type last_modified: ~datetime.datetime + :param etag: Required. + :type etag: str + :param lease_status: Possible values include: "locked", "unlocked". + :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType + :param lease_state: Possible values include: "available", "leased", "expired", "breaking", + "broken". + :type lease_state: str or ~azure.storage.blob.models.LeaseStateType + :param lease_duration: Possible values include: "infinite", "fixed". + :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType + :param public_access: Possible values include: "container", "blob". + :type public_access: str or ~azure.storage.blob.models.PublicAccessType + :param has_immutability_policy: + :type has_immutability_policy: bool + :param has_legal_hold: + :type has_legal_hold: bool + :param default_encryption_scope: + :type default_encryption_scope: str + :param prevent_encryption_scope_override: + :type prevent_encryption_scope_override: bool + :param deleted_time: + :type deleted_time: ~datetime.datetime + :param remaining_retention_days: + :type remaining_retention_days: int + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + } + + _attribute_map = { + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, + 'etag': {'key': 'Etag', 'type': 'str'}, + 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, + 'lease_state': {'key': 'LeaseState', 'type': 'str'}, + 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, + 'public_access': {'key': 'PublicAccess', 'type': 'str'}, + 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool'}, + 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool'}, + 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, + 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool'}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(ContainerProperties, self).__init__(**kwargs) + self.last_modified = kwargs['last_modified'] + self.etag = kwargs['etag'] + self.lease_status = kwargs.get('lease_status', None) + self.lease_state = kwargs.get('lease_state', None) + self.lease_duration = kwargs.get('lease_duration', None) + self.public_access = kwargs.get('public_access', None) + self.has_immutability_policy = kwargs.get('has_immutability_policy', None) + self.has_legal_hold = kwargs.get('has_legal_hold', None) + self.default_encryption_scope = kwargs.get('default_encryption_scope', None) + self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) + self.deleted_time = kwargs.get('deleted_time', None) + self.remaining_retention_days = kwargs.get('remaining_retention_days', None) + + +class CorsRule(msrest.serialization.Model): + """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param allowed_origins: Required. The origin domains that are permitted to make a request + against the storage service via CORS. The origin domain is the domain from which the request + originates. Note that the origin must be an exact case-sensitive match with the origin that the + user age sends to the service. You can also use the wildcard character '*' to allow all origin + domains to make requests via CORS. + :type allowed_origins: str + :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may + use for a CORS request. (comma separated). + :type allowed_methods: str + :param allowed_headers: Required. the request headers that the origin domain may specify on the + CORS request. + :type allowed_headers: str + :param exposed_headers: Required. The response headers that may be sent in the response to the + CORS request and exposed by the browser to the request issuer. + :type exposed_headers: str + :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the + preflight OPTIONS request. + :type max_age_in_seconds: int + """ + + _validation = { + 'allowed_origins': {'required': True}, + 'allowed_methods': {'required': True}, + 'allowed_headers': {'required': True}, + 'exposed_headers': {'required': True}, + 'max_age_in_seconds': {'required': True, 'minimum': 0}, + } + + _attribute_map = { + 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, + 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, + 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, + 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, + 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(CorsRule, self).__init__(**kwargs) + self.allowed_origins = kwargs['allowed_origins'] + self.allowed_methods = kwargs['allowed_methods'] + self.allowed_headers = kwargs['allowed_headers'] + self.exposed_headers = kwargs['exposed_headers'] + self.max_age_in_seconds = kwargs['max_age_in_seconds'] + + +class CpkInfo(msrest.serialization.Model): + """Parameter group. + + :param encryption_key: Optional. Specifies the encryption key to use to encrypt the data + provided in the request. If not specified, encryption is performed with the root account + encryption key. For more information, see Encryption at Rest for Azure Storage Services. + :type encryption_key: str + :param encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided + if the x-ms-encryption-key header is provided. + :type encryption_key_sha256: str + :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, + the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is + provided. Possible values include: "None", "AES256". + :type encryption_algorithm: str or ~azure.storage.blob.models.EncryptionAlgorithmType + """ + + _attribute_map = { + 'encryption_key': {'key': 'encryptionKey', 'type': 'str'}, + 'encryption_key_sha256': {'key': 'encryptionKeySha256', 'type': 'str'}, + 'encryption_algorithm': {'key': 'encryptionAlgorithm', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(CpkInfo, self).__init__(**kwargs) + self.encryption_key = kwargs.get('encryption_key', None) + self.encryption_key_sha256 = kwargs.get('encryption_key_sha256', None) + self.encryption_algorithm = kwargs.get('encryption_algorithm', None) + + +class CpkScopeInfo(msrest.serialization.Model): + """Parameter group. + + :param encryption_scope: Optional. Version 2019-07-07 and later. Specifies the name of the + encryption scope to use to encrypt the data provided in the request. If not specified, + encryption is performed with the default account encryption scope. For more information, see + Encryption at Rest for Azure Storage Services. + :type encryption_scope: str + """ + + _attribute_map = { + 'encryption_scope': {'key': 'encryptionScope', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(CpkScopeInfo, self).__init__(**kwargs) + self.encryption_scope = kwargs.get('encryption_scope', None) + + +class DataLakeStorageError(msrest.serialization.Model): + """DataLakeStorageError. + + :param data_lake_storage_error_details: The service error response object. + :type data_lake_storage_error_details: + ~azure.storage.blob.models.DataLakeStorageErrorAutoGenerated + """ + + _attribute_map = { + 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorAutoGenerated'}, + } + + def __init__( + self, + **kwargs + ): + super(DataLakeStorageError, self).__init__(**kwargs) + self.data_lake_storage_error_details = kwargs.get('data_lake_storage_error_details', None) + + +class DataLakeStorageErrorAutoGenerated(msrest.serialization.Model): + """The service error response object. + + :param code: The service error code. + :type code: str + :param message: The service error message. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'Code', 'type': 'str'}, + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(DataLakeStorageErrorAutoGenerated, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + + +class DelimitedTextConfiguration(msrest.serialization.Model): + """delimited text configuration. + + All required parameters must be populated in order to send to Azure. + + :param column_separator: Required. column separator. + :type column_separator: str + :param field_quote: Required. field quote. + :type field_quote: str + :param record_separator: Required. record separator. + :type record_separator: str + :param escape_char: Required. escape char. + :type escape_char: str + :param headers_present: Required. has headers. + :type headers_present: bool + """ + + _validation = { + 'column_separator': {'required': True}, + 'field_quote': {'required': True}, + 'record_separator': {'required': True}, + 'escape_char': {'required': True}, + 'headers_present': {'required': True}, + } + + _attribute_map = { + 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, + 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, + 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, + 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, + 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, + } + _xml_map = { + 'name': 'DelimitedTextConfiguration' + } + + def __init__( + self, + **kwargs + ): + super(DelimitedTextConfiguration, self).__init__(**kwargs) + self.column_separator = kwargs['column_separator'] + self.field_quote = kwargs['field_quote'] + self.record_separator = kwargs['record_separator'] + self.escape_char = kwargs['escape_char'] + self.headers_present = kwargs['headers_present'] + + +class DirectoryHttpHeaders(msrest.serialization.Model): + """Parameter group. + + :param cache_control: Cache control for given resource. + :type cache_control: str + :param content_type: Content type for given resource. + :type content_type: str + :param content_encoding: Content encoding for given resource. + :type content_encoding: str + :param content_language: Content language for given resource. + :type content_language: str + :param content_disposition: Content disposition for given resource. + :type content_disposition: str + """ + + _attribute_map = { + 'cache_control': {'key': 'cacheControl', 'type': 'str'}, + 'content_type': {'key': 'contentType', 'type': 'str'}, + 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, + 'content_language': {'key': 'contentLanguage', 'type': 'str'}, + 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(DirectoryHttpHeaders, self).__init__(**kwargs) + self.cache_control = kwargs.get('cache_control', None) + self.content_type = kwargs.get('content_type', None) + self.content_encoding = kwargs.get('content_encoding', None) + self.content_language = kwargs.get('content_language', None) + self.content_disposition = kwargs.get('content_disposition', None) + + +class FilterBlobItem(msrest.serialization.Model): + """Blob info from a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param container_name: Required. + :type container_name: str + :param tags: A set of tags. Blob tags. + :type tags: ~azure.storage.blob.models.BlobTags + """ + + _validation = { + 'name': {'required': True}, + 'container_name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'container_name': {'key': 'ContainerName', 'type': 'str'}, + 'tags': {'key': 'Tags', 'type': 'BlobTags'}, + } + _xml_map = { + 'name': 'Blob' + } + + def __init__( + self, + **kwargs + ): + super(FilterBlobItem, self).__init__(**kwargs) + self.name = kwargs['name'] + self.container_name = kwargs['container_name'] + self.tags = kwargs.get('tags', None) + + +class FilterBlobSegment(msrest.serialization.Model): + """The result of a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param where: Required. + :type where: str + :param blobs: Required. + :type blobs: list[~azure.storage.blob.models.FilterBlobItem] + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'where': {'required': True}, + 'blobs': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'where': {'key': 'Where', 'type': 'str'}, + 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'wrapped': True, 'itemsName': 'Blob'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + **kwargs + ): + super(FilterBlobSegment, self).__init__(**kwargs) + self.service_endpoint = kwargs['service_endpoint'] + self.where = kwargs['where'] + self.blobs = kwargs['blobs'] + self.next_marker = kwargs.get('next_marker', None) + + +class GeoReplication(msrest.serialization.Model): + """Geo-Replication information for the Secondary Storage Service. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. The status of the secondary location. Possible values include: "live", + "bootstrap", "unavailable". + :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType + :param last_sync_time: Required. A GMT date/time value, to the second. All primary writes + preceding this value are guaranteed to be available for read operations at the secondary. + Primary writes after this point in time may or may not be available for reads. + :type last_sync_time: ~datetime.datetime + """ + + _validation = { + 'status': {'required': True}, + 'last_sync_time': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'Status', 'type': 'str'}, + 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123'}, + } + + def __init__( + self, + **kwargs + ): + super(GeoReplication, self).__init__(**kwargs) + self.status = kwargs['status'] + self.last_sync_time = kwargs['last_sync_time'] + + +class JsonTextConfiguration(msrest.serialization.Model): + """json text configuration. + + All required parameters must be populated in order to send to Azure. + + :param record_separator: Required. record separator. + :type record_separator: str + """ + + _validation = { + 'record_separator': {'required': True}, + } + + _attribute_map = { + 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, + } + _xml_map = { + 'name': 'JsonTextConfiguration' + } + + def __init__( + self, + **kwargs + ): + super(JsonTextConfiguration, self).__init__(**kwargs) + self.record_separator = kwargs['record_separator'] + + +class KeyInfo(msrest.serialization.Model): + """Key information. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The date-time the key is active in ISO 8601 UTC time. + :type start: str + :param expiry: Required. The date-time the key expires in ISO 8601 UTC time. + :type expiry: str + """ + + _validation = { + 'start': {'required': True}, + 'expiry': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str'}, + 'expiry': {'key': 'Expiry', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(KeyInfo, self).__init__(**kwargs) + self.start = kwargs['start'] + self.expiry = kwargs['expiry'] + + +class LeaseAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param lease_id: If specified, the operation only succeeds if the resource's lease is active + and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': 'leaseId', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = kwargs.get('lease_id', None) + + +class ListBlobsFlatSegmentResponse(msrest.serialization.Model): + """An enumeration of blobs. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param container_name: Required. + :type container_name: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param segment: Required. + :type segment: ~azure.storage.blob.models.BlobFlatListSegment + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_name': {'required': True}, + 'segment': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment'}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + **kwargs + ): + super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs['service_endpoint'] + self.container_name = kwargs['container_name'] + self.prefix = kwargs.get('prefix', None) + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.segment = kwargs['segment'] + self.next_marker = kwargs.get('next_marker', None) + + +class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): + """An enumeration of blobs. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param container_name: Required. + :type container_name: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param delimiter: + :type delimiter: str + :param segment: Required. + :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_name': {'required': True}, + 'segment': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'delimiter': {'key': 'Delimiter', 'type': 'str'}, + 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + **kwargs + ): + super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs['service_endpoint'] + self.container_name = kwargs['container_name'] + self.prefix = kwargs.get('prefix', None) + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.delimiter = kwargs.get('delimiter', None) + self.segment = kwargs['segment'] + self.next_marker = kwargs.get('next_marker', None) + + +class ListContainersSegmentResponse(msrest.serialization.Model): + """An enumeration of containers. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param container_items: Required. + :type container_items: list[~azure.storage.blob.models.ContainerItem] + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_items': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'wrapped': True, 'itemsName': 'Container'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + **kwargs + ): + super(ListContainersSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs['service_endpoint'] + self.prefix = kwargs.get('prefix', None) + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.container_items = kwargs['container_items'] + self.next_marker = kwargs.get('next_marker', None) + + +class Logging(msrest.serialization.Model): + """Azure Analytics Logging settings. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of Storage Analytics to configure. + :type version: str + :param delete: Required. Indicates whether all delete requests should be logged. + :type delete: bool + :param read: Required. Indicates whether all read requests should be logged. + :type read: bool + :param write: Required. Indicates whether all write requests should be logged. + :type write: bool + :param retention_policy: Required. the retention policy which determines how long the + associated data should persist. + :type retention_policy: ~azure.storage.blob.models.RetentionPolicy + """ + + _validation = { + 'version': {'required': True}, + 'delete': {'required': True}, + 'read': {'required': True}, + 'write': {'required': True}, + 'retention_policy': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str'}, + 'delete': {'key': 'Delete', 'type': 'bool'}, + 'read': {'key': 'Read', 'type': 'bool'}, + 'write': {'key': 'Write', 'type': 'bool'}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, + } + + def __init__( + self, + **kwargs + ): + super(Logging, self).__init__(**kwargs) + self.version = kwargs['version'] + self.delete = kwargs['delete'] + self.read = kwargs['read'] + self.write = kwargs['write'] + self.retention_policy = kwargs['retention_policy'] + + +class Metrics(msrest.serialization.Model): + """a summary of request statistics grouped by API in hour or minute aggregates for blobs. + + All required parameters must be populated in order to send to Azure. + + :param version: The version of Storage Analytics to configure. + :type version: str + :param enabled: Required. Indicates whether metrics are enabled for the Blob service. + :type enabled: bool + :param include_apis: Indicates whether metrics should generate summary statistics for called + API operations. + :type include_apis: bool + :param retention_policy: the retention policy which determines how long the associated data + should persist. + :type retention_policy: ~azure.storage.blob.models.RetentionPolicy + """ + + _validation = { + 'enabled': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str'}, + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, + } + + def __init__( + self, + **kwargs + ): + super(Metrics, self).__init__(**kwargs) + self.version = kwargs.get('version', None) + self.enabled = kwargs['enabled'] + self.include_apis = kwargs.get('include_apis', None) + self.retention_policy = kwargs.get('retention_policy', None) + + +class ModifiedAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param if_modified_since: Specify this header value to operate only on a blob if it has been + modified since the specified date/time. + :type if_modified_since: ~datetime.datetime + :param if_unmodified_since: Specify this header value to operate only on a blob if it has not + been modified since the specified date/time. + :type if_unmodified_since: ~datetime.datetime + :param if_match: Specify an ETag value to operate only on blobs with a matching value. + :type if_match: str + :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. + :type if_none_match: str + :param if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a + matching value. + :type if_tags: str + """ + + _attribute_map = { + 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, + 'if_match': {'key': 'ifMatch', 'type': 'str'}, + 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, + 'if_tags': {'key': 'ifTags', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ModifiedAccessConditions, self).__init__(**kwargs) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_tags = kwargs.get('if_tags', None) + + +class PageList(msrest.serialization.Model): + """the list of pages. + + :param page_range: + :type page_range: list[~azure.storage.blob.models.PageRange] + :param clear_range: + :type clear_range: list[~azure.storage.blob.models.ClearRange] + """ + + _attribute_map = { + 'page_range': {'key': 'PageRange', 'type': '[PageRange]'}, + 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]'}, + } + + def __init__( + self, + **kwargs + ): + super(PageList, self).__init__(**kwargs) + self.page_range = kwargs.get('page_range', None) + self.clear_range = kwargs.get('clear_range', None) + + +class PageRange(msrest.serialization.Model): + """PageRange. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. + :type start: long + :param end: Required. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'PageRange' + } + + def __init__( + self, + **kwargs + ): + super(PageRange, self).__init__(**kwargs) + self.start = kwargs['start'] + self.end = kwargs['end'] + + +class QueryFormat(msrest.serialization.Model): + """QueryFormat. + + :param type: The quick query format type. Possible values include: "delimited", "json", + "arrow". + :type type: str or ~azure.storage.blob.models.QueryFormatType + :param delimited_text_configuration: delimited text configuration. + :type delimited_text_configuration: ~azure.storage.blob.models.DelimitedTextConfiguration + :param json_text_configuration: json text configuration. + :type json_text_configuration: ~azure.storage.blob.models.JsonTextConfiguration + :param arrow_configuration: arrow configuration. + :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration + """ + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}}, + 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration'}, + 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration'}, + 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration'}, + } + + def __init__( + self, + **kwargs + ): + super(QueryFormat, self).__init__(**kwargs) + self.type = kwargs.get('type', None) + self.delimited_text_configuration = kwargs.get('delimited_text_configuration', None) + self.json_text_configuration = kwargs.get('json_text_configuration', None) + self.arrow_configuration = kwargs.get('arrow_configuration', None) + + +class QueryRequest(msrest.serialization.Model): + """the quick query body. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar query_type: Required. the query type. Default value: "SQL". + :vartype query_type: str + :param expression: Required. a query statement. + :type expression: str + :param input_serialization: + :type input_serialization: ~azure.storage.blob.models.QuerySerialization + :param output_serialization: + :type output_serialization: ~azure.storage.blob.models.QuerySerialization + """ + + _validation = { + 'query_type': {'required': True, 'constant': True}, + 'expression': {'required': True}, + } + + _attribute_map = { + 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, + 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, + 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization'}, + 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization'}, + } + _xml_map = { + 'name': 'QueryRequest' + } + + query_type = "SQL" + + def __init__( + self, + **kwargs + ): + super(QueryRequest, self).__init__(**kwargs) + self.expression = kwargs['expression'] + self.input_serialization = kwargs.get('input_serialization', None) + self.output_serialization = kwargs.get('output_serialization', None) + + +class QuerySerialization(msrest.serialization.Model): + """QuerySerialization. + + All required parameters must be populated in order to send to Azure. + + :param format: Required. + :type format: ~azure.storage.blob.models.QueryFormat + """ + + _validation = { + 'format': {'required': True}, + } + + _attribute_map = { + 'format': {'key': 'Format', 'type': 'QueryFormat'}, + } + + def __init__( + self, + **kwargs + ): + super(QuerySerialization, self).__init__(**kwargs) + self.format = kwargs['format'] + + +class RetentionPolicy(msrest.serialization.Model): + """the retention policy which determines how long the associated data should persist. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether a retention policy is enabled for the storage + service. + :type enabled: bool + :param days: Indicates the number of days that metrics or logging or soft-deleted data should + be retained. All data older than this value will be deleted. + :type days: int + :param allow_permanent_delete: Indicates whether permanent delete is allowed on this storage + account. + :type allow_permanent_delete: bool + """ + + _validation = { + 'enabled': {'required': True}, + 'days': {'minimum': 1}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'days': {'key': 'Days', 'type': 'int'}, + 'allow_permanent_delete': {'key': 'AllowPermanentDelete', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(RetentionPolicy, self).__init__(**kwargs) + self.enabled = kwargs['enabled'] + self.days = kwargs.get('days', None) + self.allow_permanent_delete = kwargs.get('allow_permanent_delete', None) + + +class SequenceNumberAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param if_sequence_number_less_than_or_equal_to: Specify this header value to operate only on a + blob if it has a sequence number less than or equal to the specified. + :type if_sequence_number_less_than_or_equal_to: long + :param if_sequence_number_less_than: Specify this header value to operate only on a blob if it + has a sequence number less than the specified. + :type if_sequence_number_less_than: long + :param if_sequence_number_equal_to: Specify this header value to operate only on a blob if it + has the specified sequence number. + :type if_sequence_number_equal_to: long + """ + + _attribute_map = { + 'if_sequence_number_less_than_or_equal_to': {'key': 'ifSequenceNumberLessThanOrEqualTo', 'type': 'long'}, + 'if_sequence_number_less_than': {'key': 'ifSequenceNumberLessThan', 'type': 'long'}, + 'if_sequence_number_equal_to': {'key': 'ifSequenceNumberEqualTo', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(SequenceNumberAccessConditions, self).__init__(**kwargs) + self.if_sequence_number_less_than_or_equal_to = kwargs.get('if_sequence_number_less_than_or_equal_to', None) + self.if_sequence_number_less_than = kwargs.get('if_sequence_number_less_than', None) + self.if_sequence_number_equal_to = kwargs.get('if_sequence_number_equal_to', None) + + +class SignedIdentifier(msrest.serialization.Model): + """signed identifier. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. a unique id. + :type id: str + :param access_policy: An Access policy. + :type access_policy: ~azure.storage.blob.models.AccessPolicy + """ + + _validation = { + 'id': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, + } + _xml_map = { + 'name': 'SignedIdentifier' + } + + def __init__( + self, + **kwargs + ): + super(SignedIdentifier, self).__init__(**kwargs) + self.id = kwargs['id'] + self.access_policy = kwargs.get('access_policy', None) + + +class SourceModifiedAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param source_if_modified_since: Specify this header value to operate only on a blob if it has + been modified since the specified date/time. + :type source_if_modified_since: ~datetime.datetime + :param source_if_unmodified_since: Specify this header value to operate only on a blob if it + has not been modified since the specified date/time. + :type source_if_unmodified_since: ~datetime.datetime + :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. + :type source_if_match: str + :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching + value. + :type source_if_none_match: str + :param source_if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a + matching value. + :type source_if_tags: str + """ + + _attribute_map = { + 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, + 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, + 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, + 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, + 'source_if_tags': {'key': 'sourceIfTags', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_modified_since = kwargs.get('source_if_modified_since', None) + self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) + self.source_if_match = kwargs.get('source_if_match', None) + self.source_if_none_match = kwargs.get('source_if_none_match', None) + self.source_if_tags = kwargs.get('source_if_tags', None) + + +class StaticWebsite(msrest.serialization.Model): + """The properties that enable an account to host a static website. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether this account is hosting a static website. + :type enabled: bool + :param index_document: The default name of the index page under each directory. + :type index_document: str + :param error_document404_path: The absolute path of the custom 404 page. + :type error_document404_path: str + :param default_index_document_path: Absolute path of the default index page. + :type default_index_document_path: str + """ + + _validation = { + 'enabled': {'required': True}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'index_document': {'key': 'IndexDocument', 'type': 'str'}, + 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str'}, + 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(StaticWebsite, self).__init__(**kwargs) + self.enabled = kwargs['enabled'] + self.index_document = kwargs.get('index_document', None) + self.error_document404_path = kwargs.get('error_document404_path', None) + self.default_index_document_path = kwargs.get('default_index_document_path', None) + + +class StorageError(msrest.serialization.Model): + """StorageError. + + :param message: + :type message: str + """ + + _attribute_map = { + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageError, self).__init__(**kwargs) + self.message = kwargs.get('message', None) + + +class StorageServiceProperties(msrest.serialization.Model): + """Storage Service Properties. + + :param logging: Azure Analytics Logging settings. + :type logging: ~azure.storage.blob.models.Logging + :param hour_metrics: a summary of request statistics grouped by API in hour or minute + aggregates for blobs. + :type hour_metrics: ~azure.storage.blob.models.Metrics + :param minute_metrics: a summary of request statistics grouped by API in hour or minute + aggregates for blobs. + :type minute_metrics: ~azure.storage.blob.models.Metrics + :param cors: The set of CORS rules. + :type cors: list[~azure.storage.blob.models.CorsRule] + :param default_service_version: The default version to use for requests to the Blob service if + an incoming request's version is not specified. Possible values include version 2008-10-27 and + all more recent versions. + :type default_service_version: str + :param delete_retention_policy: the retention policy which determines how long the associated + data should persist. + :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy + :param static_website: The properties that enable an account to host a static website. + :type static_website: ~azure.storage.blob.models.StaticWebsite + """ + + _attribute_map = { + 'logging': {'key': 'Logging', 'type': 'Logging'}, + 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, + 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, + 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, + 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str'}, + 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy'}, + 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageServiceProperties, self).__init__(**kwargs) + self.logging = kwargs.get('logging', None) + self.hour_metrics = kwargs.get('hour_metrics', None) + self.minute_metrics = kwargs.get('minute_metrics', None) + self.cors = kwargs.get('cors', None) + self.default_service_version = kwargs.get('default_service_version', None) + self.delete_retention_policy = kwargs.get('delete_retention_policy', None) + self.static_website = kwargs.get('static_website', None) + + +class StorageServiceStats(msrest.serialization.Model): + """Stats for the storage service. + + :param geo_replication: Geo-Replication information for the Secondary Storage Service. + :type geo_replication: ~azure.storage.blob.models.GeoReplication + """ + + _attribute_map = { + 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageServiceStats, self).__init__(**kwargs) + self.geo_replication = kwargs.get('geo_replication', None) + + +class UserDelegationKey(msrest.serialization.Model): + """A user delegation key. + + All required parameters must be populated in order to send to Azure. + + :param signed_oid: Required. The Azure Active Directory object ID in GUID format. + :type signed_oid: str + :param signed_tid: Required. The Azure Active Directory tenant ID in GUID format. + :type signed_tid: str + :param signed_start: Required. The date-time the key is active. + :type signed_start: ~datetime.datetime + :param signed_expiry: Required. The date-time the key expires. + :type signed_expiry: ~datetime.datetime + :param signed_service: Required. Abbreviation of the Azure Storage service that accepts the + key. + :type signed_service: str + :param signed_version: Required. The service version that created the key. + :type signed_version: str + :param value: Required. The key as a base64 string. + :type value: str + """ + + _validation = { + 'signed_oid': {'required': True}, + 'signed_tid': {'required': True}, + 'signed_start': {'required': True}, + 'signed_expiry': {'required': True}, + 'signed_service': {'required': True}, + 'signed_version': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'signed_oid': {'key': 'SignedOid', 'type': 'str'}, + 'signed_tid': {'key': 'SignedTid', 'type': 'str'}, + 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601'}, + 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601'}, + 'signed_service': {'key': 'SignedService', 'type': 'str'}, + 'signed_version': {'key': 'SignedVersion', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(UserDelegationKey, self).__init__(**kwargs) + self.signed_oid = kwargs['signed_oid'] + self.signed_tid = kwargs['signed_tid'] + self.signed_start = kwargs['signed_start'] + self.signed_expiry = kwargs['signed_expiry'] + self.signed_service = kwargs['signed_service'] + self.signed_version = kwargs['signed_version'] + self.value = kwargs['value'] diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_models_py3.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_models_py3.py new file mode 100644 index 0000000..2ed0d23 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/models/_models_py3.py @@ -0,0 +1,2303 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +from typing import Dict, List, Optional, Union + +from azure.core.exceptions import HttpResponseError +import msrest.serialization + +from ._azure_blob_storage_enums import * + + +class AccessPolicy(msrest.serialization.Model): + """An Access policy. + + :param start: the date-time the policy is active. + :type start: str + :param expiry: the date-time the policy expires. + :type expiry: str + :param permission: the permissions for the acl policy. + :type permission: str + """ + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str'}, + 'expiry': {'key': 'Expiry', 'type': 'str'}, + 'permission': {'key': 'Permission', 'type': 'str'}, + } + + def __init__( + self, + *, + start: Optional[str] = None, + expiry: Optional[str] = None, + permission: Optional[str] = None, + **kwargs + ): + super(AccessPolicy, self).__init__(**kwargs) + self.start = start + self.expiry = expiry + self.permission = permission + + +class AppendPositionAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param max_size: Optional conditional header. The max length in bytes permitted for the append + blob. If the Append Block operation would cause the blob to exceed that limit or if the blob + size is already greater than the value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :type max_size: long + :param append_position: Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will succeed only if the append + position is equal to this number. If it is not, the request will fail with the + AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + :type append_position: long + """ + + _attribute_map = { + 'max_size': {'key': 'maxSize', 'type': 'long'}, + 'append_position': {'key': 'appendPosition', 'type': 'long'}, + } + + def __init__( + self, + *, + max_size: Optional[int] = None, + append_position: Optional[int] = None, + **kwargs + ): + super(AppendPositionAccessConditions, self).__init__(**kwargs) + self.max_size = max_size + self.append_position = append_position + + +class ArrowConfiguration(msrest.serialization.Model): + """arrow configuration. + + All required parameters must be populated in order to send to Azure. + + :param schema: Required. + :type schema: list[~azure.storage.blob.models.ArrowField] + """ + + _validation = { + 'schema': {'required': True}, + } + + _attribute_map = { + 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'wrapped': True, 'itemsName': 'Field'}}, + } + _xml_map = { + 'name': 'ArrowConfiguration' + } + + def __init__( + self, + *, + schema: List["ArrowField"], + **kwargs + ): + super(ArrowConfiguration, self).__init__(**kwargs) + self.schema = schema + + +class ArrowField(msrest.serialization.Model): + """field of an arrow schema. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. + :type type: str + :param name: + :type name: str + :param precision: + :type precision: int + :param scale: + :type scale: int + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str'}, + 'name': {'key': 'Name', 'type': 'str'}, + 'precision': {'key': 'Precision', 'type': 'int'}, + 'scale': {'key': 'Scale', 'type': 'int'}, + } + _xml_map = { + 'name': 'Field' + } + + def __init__( + self, + *, + type: str, + name: Optional[str] = None, + precision: Optional[int] = None, + scale: Optional[int] = None, + **kwargs + ): + super(ArrowField, self).__init__(**kwargs) + self.type = type + self.name = name + self.precision = precision + self.scale = scale + + +class BlobFlatListSegment(msrest.serialization.Model): + """BlobFlatListSegment. + + All required parameters must be populated in order to send to Azure. + + :param blob_items: Required. + :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] + """ + + _validation = { + 'blob_items': {'required': True}, + } + + _attribute_map = { + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, + } + _xml_map = { + 'name': 'Blobs' + } + + def __init__( + self, + *, + blob_items: List["BlobItemInternal"], + **kwargs + ): + super(BlobFlatListSegment, self).__init__(**kwargs) + self.blob_items = blob_items + + +class BlobHierarchyListSegment(msrest.serialization.Model): + """BlobHierarchyListSegment. + + All required parameters must be populated in order to send to Azure. + + :param blob_prefixes: + :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] + :param blob_items: Required. + :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] + """ + + _validation = { + 'blob_items': {'required': True}, + } + + _attribute_map = { + 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix'}}, + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, + } + _xml_map = { + 'name': 'Blobs' + } + + def __init__( + self, + *, + blob_items: List["BlobItemInternal"], + blob_prefixes: Optional[List["BlobPrefix"]] = None, + **kwargs + ): + super(BlobHierarchyListSegment, self).__init__(**kwargs) + self.blob_prefixes = blob_prefixes + self.blob_items = blob_items + + +class BlobHTTPHeaders(msrest.serialization.Model): + """Parameter group. + + :param blob_cache_control: Optional. Sets the blob's cache control. If specified, this property + is stored with the blob and returned with a read request. + :type blob_cache_control: str + :param blob_content_type: Optional. Sets the blob's content type. If specified, this property + is stored with the blob and returned with a read request. + :type blob_content_type: str + :param blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash is not + validated, as the hashes for the individual blocks were validated when each was uploaded. + :type blob_content_md5: bytearray + :param blob_content_encoding: Optional. Sets the blob's content encoding. If specified, this + property is stored with the blob and returned with a read request. + :type blob_content_encoding: str + :param blob_content_language: Optional. Set the blob's content language. If specified, this + property is stored with the blob and returned with a read request. + :type blob_content_language: str + :param blob_content_disposition: Optional. Sets the blob's Content-Disposition header. + :type blob_content_disposition: str + """ + + _attribute_map = { + 'blob_cache_control': {'key': 'blobCacheControl', 'type': 'str'}, + 'blob_content_type': {'key': 'blobContentType', 'type': 'str'}, + 'blob_content_md5': {'key': 'blobContentMD5', 'type': 'bytearray'}, + 'blob_content_encoding': {'key': 'blobContentEncoding', 'type': 'str'}, + 'blob_content_language': {'key': 'blobContentLanguage', 'type': 'str'}, + 'blob_content_disposition': {'key': 'blobContentDisposition', 'type': 'str'}, + } + + def __init__( + self, + *, + blob_cache_control: Optional[str] = None, + blob_content_type: Optional[str] = None, + blob_content_md5: Optional[bytearray] = None, + blob_content_encoding: Optional[str] = None, + blob_content_language: Optional[str] = None, + blob_content_disposition: Optional[str] = None, + **kwargs + ): + super(BlobHTTPHeaders, self).__init__(**kwargs) + self.blob_cache_control = blob_cache_control + self.blob_content_type = blob_content_type + self.blob_content_md5 = blob_content_md5 + self.blob_content_encoding = blob_content_encoding + self.blob_content_language = blob_content_language + self.blob_content_disposition = blob_content_disposition + + +class BlobItemInternal(msrest.serialization.Model): + """An Azure Storage blob. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param deleted: Required. + :type deleted: bool + :param snapshot: Required. + :type snapshot: str + :param version_id: + :type version_id: str + :param is_current_version: + :type is_current_version: bool + :param properties: Required. Properties of a blob. + :type properties: ~azure.storage.blob.models.BlobPropertiesInternal + :param metadata: + :type metadata: ~azure.storage.blob.models.BlobMetadata + :param blob_tags: Blob tags. + :type blob_tags: ~azure.storage.blob.models.BlobTags + :param object_replication_metadata: Dictionary of :code:``. + :type object_replication_metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + 'deleted': {'required': True}, + 'snapshot': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'deleted': {'key': 'Deleted', 'type': 'bool'}, + 'snapshot': {'key': 'Snapshot', 'type': 'str'}, + 'version_id': {'key': 'VersionId', 'type': 'str'}, + 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, + 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, + 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata'}, + 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags'}, + 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}'}, + } + _xml_map = { + 'name': 'Blob' + } + + def __init__( + self, + *, + name: str, + deleted: bool, + snapshot: str, + properties: "BlobPropertiesInternal", + version_id: Optional[str] = None, + is_current_version: Optional[bool] = None, + metadata: Optional["BlobMetadata"] = None, + blob_tags: Optional["BlobTags"] = None, + object_replication_metadata: Optional[Dict[str, str]] = None, + **kwargs + ): + super(BlobItemInternal, self).__init__(**kwargs) + self.name = name + self.deleted = deleted + self.snapshot = snapshot + self.version_id = version_id + self.is_current_version = is_current_version + self.properties = properties + self.metadata = metadata + self.blob_tags = blob_tags + self.object_replication_metadata = object_replication_metadata + + +class BlobMetadata(msrest.serialization.Model): + """BlobMetadata. + + :param additional_properties: Unmatched properties from the message are deserialized to this + collection. + :type additional_properties: dict[str, str] + :param encrypted: + :type encrypted: str + """ + + _attribute_map = { + 'additional_properties': {'key': '', 'type': '{str}'}, + 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'attr': True}}, + } + _xml_map = { + 'name': 'Metadata' + } + + def __init__( + self, + *, + additional_properties: Optional[Dict[str, str]] = None, + encrypted: Optional[str] = None, + **kwargs + ): + super(BlobMetadata, self).__init__(**kwargs) + self.additional_properties = additional_properties + self.encrypted = encrypted + + +class BlobPrefix(msrest.serialization.Model): + """BlobPrefix. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(BlobPrefix, self).__init__(**kwargs) + self.name = name + + +class BlobPropertiesInternal(msrest.serialization.Model): + """Properties of a blob. + + All required parameters must be populated in order to send to Azure. + + :param creation_time: + :type creation_time: ~datetime.datetime + :param last_modified: Required. + :type last_modified: ~datetime.datetime + :param etag: Required. + :type etag: str + :param content_length: Size in bytes. + :type content_length: long + :param content_type: + :type content_type: str + :param content_encoding: + :type content_encoding: str + :param content_language: + :type content_language: str + :param content_md5: + :type content_md5: bytearray + :param content_disposition: + :type content_disposition: str + :param cache_control: + :type cache_control: str + :param blob_sequence_number: + :type blob_sequence_number: long + :param blob_type: Possible values include: "BlockBlob", "PageBlob", "AppendBlob". + :type blob_type: str or ~azure.storage.blob.models.BlobType + :param lease_status: Possible values include: "locked", "unlocked". + :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType + :param lease_state: Possible values include: "available", "leased", "expired", "breaking", + "broken". + :type lease_state: str or ~azure.storage.blob.models.LeaseStateType + :param lease_duration: Possible values include: "infinite", "fixed". + :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType + :param copy_id: + :type copy_id: str + :param copy_status: Possible values include: "pending", "success", "aborted", "failed". + :type copy_status: str or ~azure.storage.blob.models.CopyStatusType + :param copy_source: + :type copy_source: str + :param copy_progress: + :type copy_progress: str + :param copy_completion_time: + :type copy_completion_time: ~datetime.datetime + :param copy_status_description: + :type copy_status_description: str + :param server_encrypted: + :type server_encrypted: bool + :param incremental_copy: + :type incremental_copy: bool + :param destination_snapshot: + :type destination_snapshot: str + :param deleted_time: + :type deleted_time: ~datetime.datetime + :param remaining_retention_days: + :type remaining_retention_days: int + :param access_tier: Possible values include: "P4", "P6", "P10", "P15", "P20", "P30", "P40", + "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive". + :type access_tier: str or ~azure.storage.blob.models.AccessTier + :param access_tier_inferred: + :type access_tier_inferred: bool + :param archive_status: Possible values include: "rehydrate-pending-to-hot", "rehydrate- + pending-to-cool". + :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus + :param customer_provided_key_sha256: + :type customer_provided_key_sha256: str + :param encryption_scope: The name of the encryption scope under which the blob is encrypted. + :type encryption_scope: str + :param access_tier_change_time: + :type access_tier_change_time: ~datetime.datetime + :param tag_count: + :type tag_count: int + :param expires_on: + :type expires_on: ~datetime.datetime + :param is_sealed: + :type is_sealed: bool + :param rehydrate_priority: If an object is in rehydrate pending state then this header is + returned with priority of rehydrate. Valid values are High and Standard. Possible values + include: "High", "Standard". + :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :param last_accessed_on: + :type last_accessed_on: ~datetime.datetime + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + } + + _attribute_map = { + 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, + 'etag': {'key': 'Etag', 'type': 'str'}, + 'content_length': {'key': 'Content-Length', 'type': 'long'}, + 'content_type': {'key': 'Content-Type', 'type': 'str'}, + 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, + 'content_language': {'key': 'Content-Language', 'type': 'str'}, + 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, + 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, + 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, + 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, + 'blob_type': {'key': 'BlobType', 'type': 'str'}, + 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, + 'lease_state': {'key': 'LeaseState', 'type': 'str'}, + 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, + 'copy_id': {'key': 'CopyId', 'type': 'str'}, + 'copy_status': {'key': 'CopyStatus', 'type': 'str'}, + 'copy_source': {'key': 'CopySource', 'type': 'str'}, + 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, + 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, + 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, + 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, + 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, + 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, + 'access_tier': {'key': 'AccessTier', 'type': 'str'}, + 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, + 'archive_status': {'key': 'ArchiveStatus', 'type': 'str'}, + 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, + 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, + 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, + 'tag_count': {'key': 'TagCount', 'type': 'int'}, + 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, + 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, + 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str'}, + 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, + } + _xml_map = { + 'name': 'Properties' + } + + def __init__( + self, + *, + last_modified: datetime.datetime, + etag: str, + creation_time: Optional[datetime.datetime] = None, + content_length: Optional[int] = None, + content_type: Optional[str] = None, + content_encoding: Optional[str] = None, + content_language: Optional[str] = None, + content_md5: Optional[bytearray] = None, + content_disposition: Optional[str] = None, + cache_control: Optional[str] = None, + blob_sequence_number: Optional[int] = None, + blob_type: Optional[Union[str, "BlobType"]] = None, + lease_status: Optional[Union[str, "LeaseStatusType"]] = None, + lease_state: Optional[Union[str, "LeaseStateType"]] = None, + lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, + copy_id: Optional[str] = None, + copy_status: Optional[Union[str, "CopyStatusType"]] = None, + copy_source: Optional[str] = None, + copy_progress: Optional[str] = None, + copy_completion_time: Optional[datetime.datetime] = None, + copy_status_description: Optional[str] = None, + server_encrypted: Optional[bool] = None, + incremental_copy: Optional[bool] = None, + destination_snapshot: Optional[str] = None, + deleted_time: Optional[datetime.datetime] = None, + remaining_retention_days: Optional[int] = None, + access_tier: Optional[Union[str, "AccessTier"]] = None, + access_tier_inferred: Optional[bool] = None, + archive_status: Optional[Union[str, "ArchiveStatus"]] = None, + customer_provided_key_sha256: Optional[str] = None, + encryption_scope: Optional[str] = None, + access_tier_change_time: Optional[datetime.datetime] = None, + tag_count: Optional[int] = None, + expires_on: Optional[datetime.datetime] = None, + is_sealed: Optional[bool] = None, + rehydrate_priority: Optional[Union[str, "RehydratePriority"]] = None, + last_accessed_on: Optional[datetime.datetime] = None, + **kwargs + ): + super(BlobPropertiesInternal, self).__init__(**kwargs) + self.creation_time = creation_time + self.last_modified = last_modified + self.etag = etag + self.content_length = content_length + self.content_type = content_type + self.content_encoding = content_encoding + self.content_language = content_language + self.content_md5 = content_md5 + self.content_disposition = content_disposition + self.cache_control = cache_control + self.blob_sequence_number = blob_sequence_number + self.blob_type = blob_type + self.lease_status = lease_status + self.lease_state = lease_state + self.lease_duration = lease_duration + self.copy_id = copy_id + self.copy_status = copy_status + self.copy_source = copy_source + self.copy_progress = copy_progress + self.copy_completion_time = copy_completion_time + self.copy_status_description = copy_status_description + self.server_encrypted = server_encrypted + self.incremental_copy = incremental_copy + self.destination_snapshot = destination_snapshot + self.deleted_time = deleted_time + self.remaining_retention_days = remaining_retention_days + self.access_tier = access_tier + self.access_tier_inferred = access_tier_inferred + self.archive_status = archive_status + self.customer_provided_key_sha256 = customer_provided_key_sha256 + self.encryption_scope = encryption_scope + self.access_tier_change_time = access_tier_change_time + self.tag_count = tag_count + self.expires_on = expires_on + self.is_sealed = is_sealed + self.rehydrate_priority = rehydrate_priority + self.last_accessed_on = last_accessed_on + + +class BlobTag(msrest.serialization.Model): + """BlobTag. + + All required parameters must be populated in order to send to Azure. + + :param key: Required. + :type key: str + :param value: Required. + :type value: str + """ + + _validation = { + 'key': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'key': {'key': 'Key', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'str'}, + } + _xml_map = { + 'name': 'Tag' + } + + def __init__( + self, + *, + key: str, + value: str, + **kwargs + ): + super(BlobTag, self).__init__(**kwargs) + self.key = key + self.value = value + + +class BlobTags(msrest.serialization.Model): + """Blob tags. + + All required parameters must be populated in order to send to Azure. + + :param blob_tag_set: Required. + :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] + """ + + _validation = { + 'blob_tag_set': {'required': True}, + } + + _attribute_map = { + 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'wrapped': True, 'itemsName': 'Tag'}}, + } + _xml_map = { + 'name': 'Tags' + } + + def __init__( + self, + *, + blob_tag_set: List["BlobTag"], + **kwargs + ): + super(BlobTags, self).__init__(**kwargs) + self.blob_tag_set = blob_tag_set + + +class Block(msrest.serialization.Model): + """Represents a single block in a block blob. It describes the block's ID and size. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The base64 encoded block ID. + :type name: str + :param size: Required. The block size in bytes. + :type size: int + """ + + _validation = { + 'name': {'required': True}, + 'size': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'size': {'key': 'Size', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + size: int, + **kwargs + ): + super(Block, self).__init__(**kwargs) + self.name = name + self.size = size + + +class BlockList(msrest.serialization.Model): + """BlockList. + + :param committed_blocks: + :type committed_blocks: list[~azure.storage.blob.models.Block] + :param uncommitted_blocks: + :type uncommitted_blocks: list[~azure.storage.blob.models.Block] + """ + + _attribute_map = { + 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, + 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, + } + + def __init__( + self, + *, + committed_blocks: Optional[List["Block"]] = None, + uncommitted_blocks: Optional[List["Block"]] = None, + **kwargs + ): + super(BlockList, self).__init__(**kwargs) + self.committed_blocks = committed_blocks + self.uncommitted_blocks = uncommitted_blocks + + +class BlockLookupList(msrest.serialization.Model): + """BlockLookupList. + + :param committed: + :type committed: list[str] + :param uncommitted: + :type uncommitted: list[str] + :param latest: + :type latest: list[str] + """ + + _attribute_map = { + 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'itemsName': 'Committed'}}, + 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'itemsName': 'Uncommitted'}}, + 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'itemsName': 'Latest'}}, + } + _xml_map = { + 'name': 'BlockList' + } + + def __init__( + self, + *, + committed: Optional[List[str]] = None, + uncommitted: Optional[List[str]] = None, + latest: Optional[List[str]] = None, + **kwargs + ): + super(BlockLookupList, self).__init__(**kwargs) + self.committed = committed + self.uncommitted = uncommitted + self.latest = latest + + +class ClearRange(msrest.serialization.Model): + """ClearRange. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. + :type start: long + :param end: Required. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'ClearRange' + } + + def __init__( + self, + *, + start: int, + end: int, + **kwargs + ): + super(ClearRange, self).__init__(**kwargs) + self.start = start + self.end = end + + +class ContainerCpkScopeInfo(msrest.serialization.Model): + """Parameter group. + + :param default_encryption_scope: Optional. Version 2019-07-07 and later. Specifies the + default encryption scope to set on the container and use for all future writes. + :type default_encryption_scope: str + :param prevent_encryption_scope_override: Optional. Version 2019-07-07 and newer. If true, + prevents any request from specifying a different encryption scope than the scope set on the + container. + :type prevent_encryption_scope_override: bool + """ + + _attribute_map = { + 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, + 'prevent_encryption_scope_override': {'key': 'PreventEncryptionScopeOverride', 'type': 'bool'}, + } + + def __init__( + self, + *, + default_encryption_scope: Optional[str] = None, + prevent_encryption_scope_override: Optional[bool] = None, + **kwargs + ): + super(ContainerCpkScopeInfo, self).__init__(**kwargs) + self.default_encryption_scope = default_encryption_scope + self.prevent_encryption_scope_override = prevent_encryption_scope_override + + +class ContainerItem(msrest.serialization.Model): + """An Azure Storage container. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param deleted: + :type deleted: bool + :param version: + :type version: str + :param properties: Required. Properties of a container. + :type properties: ~azure.storage.blob.models.ContainerProperties + :param metadata: Dictionary of :code:``. + :type metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'deleted': {'key': 'Deleted', 'type': 'bool'}, + 'version': {'key': 'Version', 'type': 'str'}, + 'properties': {'key': 'Properties', 'type': 'ContainerProperties'}, + 'metadata': {'key': 'Metadata', 'type': '{str}'}, + } + _xml_map = { + 'name': 'Container' + } + + def __init__( + self, + *, + name: str, + properties: "ContainerProperties", + deleted: Optional[bool] = None, + version: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs + ): + super(ContainerItem, self).__init__(**kwargs) + self.name = name + self.deleted = deleted + self.version = version + self.properties = properties + self.metadata = metadata + + +class ContainerProperties(msrest.serialization.Model): + """Properties of a container. + + All required parameters must be populated in order to send to Azure. + + :param last_modified: Required. + :type last_modified: ~datetime.datetime + :param etag: Required. + :type etag: str + :param lease_status: Possible values include: "locked", "unlocked". + :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType + :param lease_state: Possible values include: "available", "leased", "expired", "breaking", + "broken". + :type lease_state: str or ~azure.storage.blob.models.LeaseStateType + :param lease_duration: Possible values include: "infinite", "fixed". + :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType + :param public_access: Possible values include: "container", "blob". + :type public_access: str or ~azure.storage.blob.models.PublicAccessType + :param has_immutability_policy: + :type has_immutability_policy: bool + :param has_legal_hold: + :type has_legal_hold: bool + :param default_encryption_scope: + :type default_encryption_scope: str + :param prevent_encryption_scope_override: + :type prevent_encryption_scope_override: bool + :param deleted_time: + :type deleted_time: ~datetime.datetime + :param remaining_retention_days: + :type remaining_retention_days: int + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + } + + _attribute_map = { + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, + 'etag': {'key': 'Etag', 'type': 'str'}, + 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, + 'lease_state': {'key': 'LeaseState', 'type': 'str'}, + 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, + 'public_access': {'key': 'PublicAccess', 'type': 'str'}, + 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool'}, + 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool'}, + 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, + 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool'}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, + } + + def __init__( + self, + *, + last_modified: datetime.datetime, + etag: str, + lease_status: Optional[Union[str, "LeaseStatusType"]] = None, + lease_state: Optional[Union[str, "LeaseStateType"]] = None, + lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, + public_access: Optional[Union[str, "PublicAccessType"]] = None, + has_immutability_policy: Optional[bool] = None, + has_legal_hold: Optional[bool] = None, + default_encryption_scope: Optional[str] = None, + prevent_encryption_scope_override: Optional[bool] = None, + deleted_time: Optional[datetime.datetime] = None, + remaining_retention_days: Optional[int] = None, + **kwargs + ): + super(ContainerProperties, self).__init__(**kwargs) + self.last_modified = last_modified + self.etag = etag + self.lease_status = lease_status + self.lease_state = lease_state + self.lease_duration = lease_duration + self.public_access = public_access + self.has_immutability_policy = has_immutability_policy + self.has_legal_hold = has_legal_hold + self.default_encryption_scope = default_encryption_scope + self.prevent_encryption_scope_override = prevent_encryption_scope_override + self.deleted_time = deleted_time + self.remaining_retention_days = remaining_retention_days + + +class CorsRule(msrest.serialization.Model): + """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param allowed_origins: Required. The origin domains that are permitted to make a request + against the storage service via CORS. The origin domain is the domain from which the request + originates. Note that the origin must be an exact case-sensitive match with the origin that the + user age sends to the service. You can also use the wildcard character '*' to allow all origin + domains to make requests via CORS. + :type allowed_origins: str + :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may + use for a CORS request. (comma separated). + :type allowed_methods: str + :param allowed_headers: Required. the request headers that the origin domain may specify on the + CORS request. + :type allowed_headers: str + :param exposed_headers: Required. The response headers that may be sent in the response to the + CORS request and exposed by the browser to the request issuer. + :type exposed_headers: str + :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the + preflight OPTIONS request. + :type max_age_in_seconds: int + """ + + _validation = { + 'allowed_origins': {'required': True}, + 'allowed_methods': {'required': True}, + 'allowed_headers': {'required': True}, + 'exposed_headers': {'required': True}, + 'max_age_in_seconds': {'required': True, 'minimum': 0}, + } + + _attribute_map = { + 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, + 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, + 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, + 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, + 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, + } + + def __init__( + self, + *, + allowed_origins: str, + allowed_methods: str, + allowed_headers: str, + exposed_headers: str, + max_age_in_seconds: int, + **kwargs + ): + super(CorsRule, self).__init__(**kwargs) + self.allowed_origins = allowed_origins + self.allowed_methods = allowed_methods + self.allowed_headers = allowed_headers + self.exposed_headers = exposed_headers + self.max_age_in_seconds = max_age_in_seconds + + +class CpkInfo(msrest.serialization.Model): + """Parameter group. + + :param encryption_key: Optional. Specifies the encryption key to use to encrypt the data + provided in the request. If not specified, encryption is performed with the root account + encryption key. For more information, see Encryption at Rest for Azure Storage Services. + :type encryption_key: str + :param encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided + if the x-ms-encryption-key header is provided. + :type encryption_key_sha256: str + :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, + the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is + provided. Possible values include: "None", "AES256". + :type encryption_algorithm: str or ~azure.storage.blob.models.EncryptionAlgorithmType + """ + + _attribute_map = { + 'encryption_key': {'key': 'encryptionKey', 'type': 'str'}, + 'encryption_key_sha256': {'key': 'encryptionKeySha256', 'type': 'str'}, + 'encryption_algorithm': {'key': 'encryptionAlgorithm', 'type': 'str'}, + } + + def __init__( + self, + *, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "EncryptionAlgorithmType"]] = None, + **kwargs + ): + super(CpkInfo, self).__init__(**kwargs) + self.encryption_key = encryption_key + self.encryption_key_sha256 = encryption_key_sha256 + self.encryption_algorithm = encryption_algorithm + + +class CpkScopeInfo(msrest.serialization.Model): + """Parameter group. + + :param encryption_scope: Optional. Version 2019-07-07 and later. Specifies the name of the + encryption scope to use to encrypt the data provided in the request. If not specified, + encryption is performed with the default account encryption scope. For more information, see + Encryption at Rest for Azure Storage Services. + :type encryption_scope: str + """ + + _attribute_map = { + 'encryption_scope': {'key': 'encryptionScope', 'type': 'str'}, + } + + def __init__( + self, + *, + encryption_scope: Optional[str] = None, + **kwargs + ): + super(CpkScopeInfo, self).__init__(**kwargs) + self.encryption_scope = encryption_scope + + +class DataLakeStorageError(msrest.serialization.Model): + """DataLakeStorageError. + + :param data_lake_storage_error_details: The service error response object. + :type data_lake_storage_error_details: + ~azure.storage.blob.models.DataLakeStorageErrorAutoGenerated + """ + + _attribute_map = { + 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorAutoGenerated'}, + } + + def __init__( + self, + *, + data_lake_storage_error_details: Optional["DataLakeStorageErrorAutoGenerated"] = None, + **kwargs + ): + super(DataLakeStorageError, self).__init__(**kwargs) + self.data_lake_storage_error_details = data_lake_storage_error_details + + +class DataLakeStorageErrorAutoGenerated(msrest.serialization.Model): + """The service error response object. + + :param code: The service error code. + :type code: str + :param message: The service error message. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'Code', 'type': 'str'}, + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__( + self, + *, + code: Optional[str] = None, + message: Optional[str] = None, + **kwargs + ): + super(DataLakeStorageErrorAutoGenerated, self).__init__(**kwargs) + self.code = code + self.message = message + + +class DelimitedTextConfiguration(msrest.serialization.Model): + """delimited text configuration. + + All required parameters must be populated in order to send to Azure. + + :param column_separator: Required. column separator. + :type column_separator: str + :param field_quote: Required. field quote. + :type field_quote: str + :param record_separator: Required. record separator. + :type record_separator: str + :param escape_char: Required. escape char. + :type escape_char: str + :param headers_present: Required. has headers. + :type headers_present: bool + """ + + _validation = { + 'column_separator': {'required': True}, + 'field_quote': {'required': True}, + 'record_separator': {'required': True}, + 'escape_char': {'required': True}, + 'headers_present': {'required': True}, + } + + _attribute_map = { + 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, + 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, + 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, + 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, + 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, + } + _xml_map = { + 'name': 'DelimitedTextConfiguration' + } + + def __init__( + self, + *, + column_separator: str, + field_quote: str, + record_separator: str, + escape_char: str, + headers_present: bool, + **kwargs + ): + super(DelimitedTextConfiguration, self).__init__(**kwargs) + self.column_separator = column_separator + self.field_quote = field_quote + self.record_separator = record_separator + self.escape_char = escape_char + self.headers_present = headers_present + + +class DirectoryHttpHeaders(msrest.serialization.Model): + """Parameter group. + + :param cache_control: Cache control for given resource. + :type cache_control: str + :param content_type: Content type for given resource. + :type content_type: str + :param content_encoding: Content encoding for given resource. + :type content_encoding: str + :param content_language: Content language for given resource. + :type content_language: str + :param content_disposition: Content disposition for given resource. + :type content_disposition: str + """ + + _attribute_map = { + 'cache_control': {'key': 'cacheControl', 'type': 'str'}, + 'content_type': {'key': 'contentType', 'type': 'str'}, + 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, + 'content_language': {'key': 'contentLanguage', 'type': 'str'}, + 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, + } + + def __init__( + self, + *, + cache_control: Optional[str] = None, + content_type: Optional[str] = None, + content_encoding: Optional[str] = None, + content_language: Optional[str] = None, + content_disposition: Optional[str] = None, + **kwargs + ): + super(DirectoryHttpHeaders, self).__init__(**kwargs) + self.cache_control = cache_control + self.content_type = content_type + self.content_encoding = content_encoding + self.content_language = content_language + self.content_disposition = content_disposition + + +class FilterBlobItem(msrest.serialization.Model): + """Blob info from a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param container_name: Required. + :type container_name: str + :param tags: A set of tags. Blob tags. + :type tags: ~azure.storage.blob.models.BlobTags + """ + + _validation = { + 'name': {'required': True}, + 'container_name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'container_name': {'key': 'ContainerName', 'type': 'str'}, + 'tags': {'key': 'Tags', 'type': 'BlobTags'}, + } + _xml_map = { + 'name': 'Blob' + } + + def __init__( + self, + *, + name: str, + container_name: str, + tags: Optional["BlobTags"] = None, + **kwargs + ): + super(FilterBlobItem, self).__init__(**kwargs) + self.name = name + self.container_name = container_name + self.tags = tags + + +class FilterBlobSegment(msrest.serialization.Model): + """The result of a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param where: Required. + :type where: str + :param blobs: Required. + :type blobs: list[~azure.storage.blob.models.FilterBlobItem] + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'where': {'required': True}, + 'blobs': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'where': {'key': 'Where', 'type': 'str'}, + 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'wrapped': True, 'itemsName': 'Blob'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + *, + service_endpoint: str, + where: str, + blobs: List["FilterBlobItem"], + next_marker: Optional[str] = None, + **kwargs + ): + super(FilterBlobSegment, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.where = where + self.blobs = blobs + self.next_marker = next_marker + + +class GeoReplication(msrest.serialization.Model): + """Geo-Replication information for the Secondary Storage Service. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. The status of the secondary location. Possible values include: "live", + "bootstrap", "unavailable". + :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType + :param last_sync_time: Required. A GMT date/time value, to the second. All primary writes + preceding this value are guaranteed to be available for read operations at the secondary. + Primary writes after this point in time may or may not be available for reads. + :type last_sync_time: ~datetime.datetime + """ + + _validation = { + 'status': {'required': True}, + 'last_sync_time': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'Status', 'type': 'str'}, + 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123'}, + } + + def __init__( + self, + *, + status: Union[str, "GeoReplicationStatusType"], + last_sync_time: datetime.datetime, + **kwargs + ): + super(GeoReplication, self).__init__(**kwargs) + self.status = status + self.last_sync_time = last_sync_time + + +class JsonTextConfiguration(msrest.serialization.Model): + """json text configuration. + + All required parameters must be populated in order to send to Azure. + + :param record_separator: Required. record separator. + :type record_separator: str + """ + + _validation = { + 'record_separator': {'required': True}, + } + + _attribute_map = { + 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, + } + _xml_map = { + 'name': 'JsonTextConfiguration' + } + + def __init__( + self, + *, + record_separator: str, + **kwargs + ): + super(JsonTextConfiguration, self).__init__(**kwargs) + self.record_separator = record_separator + + +class KeyInfo(msrest.serialization.Model): + """Key information. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The date-time the key is active in ISO 8601 UTC time. + :type start: str + :param expiry: Required. The date-time the key expires in ISO 8601 UTC time. + :type expiry: str + """ + + _validation = { + 'start': {'required': True}, + 'expiry': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str'}, + 'expiry': {'key': 'Expiry', 'type': 'str'}, + } + + def __init__( + self, + *, + start: str, + expiry: str, + **kwargs + ): + super(KeyInfo, self).__init__(**kwargs) + self.start = start + self.expiry = expiry + + +class LeaseAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param lease_id: If specified, the operation only succeeds if the resource's lease is active + and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': 'leaseId', 'type': 'str'}, + } + + def __init__( + self, + *, + lease_id: Optional[str] = None, + **kwargs + ): + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = lease_id + + +class ListBlobsFlatSegmentResponse(msrest.serialization.Model): + """An enumeration of blobs. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param container_name: Required. + :type container_name: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param segment: Required. + :type segment: ~azure.storage.blob.models.BlobFlatListSegment + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_name': {'required': True}, + 'segment': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment'}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + *, + service_endpoint: str, + container_name: str, + segment: "BlobFlatListSegment", + prefix: Optional[str] = None, + marker: Optional[str] = None, + max_results: Optional[int] = None, + next_marker: Optional[str] = None, + **kwargs + ): + super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.container_name = container_name + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.segment = segment + self.next_marker = next_marker + + +class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): + """An enumeration of blobs. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param container_name: Required. + :type container_name: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param delimiter: + :type delimiter: str + :param segment: Required. + :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_name': {'required': True}, + 'segment': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'delimiter': {'key': 'Delimiter', 'type': 'str'}, + 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + *, + service_endpoint: str, + container_name: str, + segment: "BlobHierarchyListSegment", + prefix: Optional[str] = None, + marker: Optional[str] = None, + max_results: Optional[int] = None, + delimiter: Optional[str] = None, + next_marker: Optional[str] = None, + **kwargs + ): + super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.container_name = container_name + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.delimiter = delimiter + self.segment = segment + self.next_marker = next_marker + + +class ListContainersSegmentResponse(msrest.serialization.Model): + """An enumeration of containers. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param container_items: Required. + :type container_items: list[~azure.storage.blob.models.ContainerItem] + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_items': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'wrapped': True, 'itemsName': 'Container'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + *, + service_endpoint: str, + container_items: List["ContainerItem"], + prefix: Optional[str] = None, + marker: Optional[str] = None, + max_results: Optional[int] = None, + next_marker: Optional[str] = None, + **kwargs + ): + super(ListContainersSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.container_items = container_items + self.next_marker = next_marker + + +class Logging(msrest.serialization.Model): + """Azure Analytics Logging settings. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of Storage Analytics to configure. + :type version: str + :param delete: Required. Indicates whether all delete requests should be logged. + :type delete: bool + :param read: Required. Indicates whether all read requests should be logged. + :type read: bool + :param write: Required. Indicates whether all write requests should be logged. + :type write: bool + :param retention_policy: Required. the retention policy which determines how long the + associated data should persist. + :type retention_policy: ~azure.storage.blob.models.RetentionPolicy + """ + + _validation = { + 'version': {'required': True}, + 'delete': {'required': True}, + 'read': {'required': True}, + 'write': {'required': True}, + 'retention_policy': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str'}, + 'delete': {'key': 'Delete', 'type': 'bool'}, + 'read': {'key': 'Read', 'type': 'bool'}, + 'write': {'key': 'Write', 'type': 'bool'}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, + } + + def __init__( + self, + *, + version: str, + delete: bool, + read: bool, + write: bool, + retention_policy: "RetentionPolicy", + **kwargs + ): + super(Logging, self).__init__(**kwargs) + self.version = version + self.delete = delete + self.read = read + self.write = write + self.retention_policy = retention_policy + + +class Metrics(msrest.serialization.Model): + """a summary of request statistics grouped by API in hour or minute aggregates for blobs. + + All required parameters must be populated in order to send to Azure. + + :param version: The version of Storage Analytics to configure. + :type version: str + :param enabled: Required. Indicates whether metrics are enabled for the Blob service. + :type enabled: bool + :param include_apis: Indicates whether metrics should generate summary statistics for called + API operations. + :type include_apis: bool + :param retention_policy: the retention policy which determines how long the associated data + should persist. + :type retention_policy: ~azure.storage.blob.models.RetentionPolicy + """ + + _validation = { + 'enabled': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str'}, + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, + } + + def __init__( + self, + *, + enabled: bool, + version: Optional[str] = None, + include_apis: Optional[bool] = None, + retention_policy: Optional["RetentionPolicy"] = None, + **kwargs + ): + super(Metrics, self).__init__(**kwargs) + self.version = version + self.enabled = enabled + self.include_apis = include_apis + self.retention_policy = retention_policy + + +class ModifiedAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param if_modified_since: Specify this header value to operate only on a blob if it has been + modified since the specified date/time. + :type if_modified_since: ~datetime.datetime + :param if_unmodified_since: Specify this header value to operate only on a blob if it has not + been modified since the specified date/time. + :type if_unmodified_since: ~datetime.datetime + :param if_match: Specify an ETag value to operate only on blobs with a matching value. + :type if_match: str + :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. + :type if_none_match: str + :param if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a + matching value. + :type if_tags: str + """ + + _attribute_map = { + 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, + 'if_match': {'key': 'ifMatch', 'type': 'str'}, + 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, + 'if_tags': {'key': 'ifTags', 'type': 'str'}, + } + + def __init__( + self, + *, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + **kwargs + ): + super(ModifiedAccessConditions, self).__init__(**kwargs) + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since + self.if_match = if_match + self.if_none_match = if_none_match + self.if_tags = if_tags + + +class PageList(msrest.serialization.Model): + """the list of pages. + + :param page_range: + :type page_range: list[~azure.storage.blob.models.PageRange] + :param clear_range: + :type clear_range: list[~azure.storage.blob.models.ClearRange] + """ + + _attribute_map = { + 'page_range': {'key': 'PageRange', 'type': '[PageRange]'}, + 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]'}, + } + + def __init__( + self, + *, + page_range: Optional[List["PageRange"]] = None, + clear_range: Optional[List["ClearRange"]] = None, + **kwargs + ): + super(PageList, self).__init__(**kwargs) + self.page_range = page_range + self.clear_range = clear_range + + +class PageRange(msrest.serialization.Model): + """PageRange. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. + :type start: long + :param end: Required. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'PageRange' + } + + def __init__( + self, + *, + start: int, + end: int, + **kwargs + ): + super(PageRange, self).__init__(**kwargs) + self.start = start + self.end = end + + +class QueryFormat(msrest.serialization.Model): + """QueryFormat. + + :param type: The quick query format type. Possible values include: "delimited", "json", + "arrow". + :type type: str or ~azure.storage.blob.models.QueryFormatType + :param delimited_text_configuration: delimited text configuration. + :type delimited_text_configuration: ~azure.storage.blob.models.DelimitedTextConfiguration + :param json_text_configuration: json text configuration. + :type json_text_configuration: ~azure.storage.blob.models.JsonTextConfiguration + :param arrow_configuration: arrow configuration. + :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration + """ + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}}, + 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration'}, + 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration'}, + 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration'}, + } + + def __init__( + self, + *, + type: Optional[Union[str, "QueryFormatType"]] = None, + delimited_text_configuration: Optional["DelimitedTextConfiguration"] = None, + json_text_configuration: Optional["JsonTextConfiguration"] = None, + arrow_configuration: Optional["ArrowConfiguration"] = None, + **kwargs + ): + super(QueryFormat, self).__init__(**kwargs) + self.type = type + self.delimited_text_configuration = delimited_text_configuration + self.json_text_configuration = json_text_configuration + self.arrow_configuration = arrow_configuration + + +class QueryRequest(msrest.serialization.Model): + """the quick query body. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar query_type: Required. the query type. Default value: "SQL". + :vartype query_type: str + :param expression: Required. a query statement. + :type expression: str + :param input_serialization: + :type input_serialization: ~azure.storage.blob.models.QuerySerialization + :param output_serialization: + :type output_serialization: ~azure.storage.blob.models.QuerySerialization + """ + + _validation = { + 'query_type': {'required': True, 'constant': True}, + 'expression': {'required': True}, + } + + _attribute_map = { + 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, + 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, + 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization'}, + 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization'}, + } + _xml_map = { + 'name': 'QueryRequest' + } + + query_type = "SQL" + + def __init__( + self, + *, + expression: str, + input_serialization: Optional["QuerySerialization"] = None, + output_serialization: Optional["QuerySerialization"] = None, + **kwargs + ): + super(QueryRequest, self).__init__(**kwargs) + self.expression = expression + self.input_serialization = input_serialization + self.output_serialization = output_serialization + + +class QuerySerialization(msrest.serialization.Model): + """QuerySerialization. + + All required parameters must be populated in order to send to Azure. + + :param format: Required. + :type format: ~azure.storage.blob.models.QueryFormat + """ + + _validation = { + 'format': {'required': True}, + } + + _attribute_map = { + 'format': {'key': 'Format', 'type': 'QueryFormat'}, + } + + def __init__( + self, + *, + format: "QueryFormat", + **kwargs + ): + super(QuerySerialization, self).__init__(**kwargs) + self.format = format + + +class RetentionPolicy(msrest.serialization.Model): + """the retention policy which determines how long the associated data should persist. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether a retention policy is enabled for the storage + service. + :type enabled: bool + :param days: Indicates the number of days that metrics or logging or soft-deleted data should + be retained. All data older than this value will be deleted. + :type days: int + :param allow_permanent_delete: Indicates whether permanent delete is allowed on this storage + account. + :type allow_permanent_delete: bool + """ + + _validation = { + 'enabled': {'required': True}, + 'days': {'minimum': 1}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'days': {'key': 'Days', 'type': 'int'}, + 'allow_permanent_delete': {'key': 'AllowPermanentDelete', 'type': 'bool'}, + } + + def __init__( + self, + *, + enabled: bool, + days: Optional[int] = None, + allow_permanent_delete: Optional[bool] = None, + **kwargs + ): + super(RetentionPolicy, self).__init__(**kwargs) + self.enabled = enabled + self.days = days + self.allow_permanent_delete = allow_permanent_delete + + +class SequenceNumberAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param if_sequence_number_less_than_or_equal_to: Specify this header value to operate only on a + blob if it has a sequence number less than or equal to the specified. + :type if_sequence_number_less_than_or_equal_to: long + :param if_sequence_number_less_than: Specify this header value to operate only on a blob if it + has a sequence number less than the specified. + :type if_sequence_number_less_than: long + :param if_sequence_number_equal_to: Specify this header value to operate only on a blob if it + has the specified sequence number. + :type if_sequence_number_equal_to: long + """ + + _attribute_map = { + 'if_sequence_number_less_than_or_equal_to': {'key': 'ifSequenceNumberLessThanOrEqualTo', 'type': 'long'}, + 'if_sequence_number_less_than': {'key': 'ifSequenceNumberLessThan', 'type': 'long'}, + 'if_sequence_number_equal_to': {'key': 'ifSequenceNumberEqualTo', 'type': 'long'}, + } + + def __init__( + self, + *, + if_sequence_number_less_than_or_equal_to: Optional[int] = None, + if_sequence_number_less_than: Optional[int] = None, + if_sequence_number_equal_to: Optional[int] = None, + **kwargs + ): + super(SequenceNumberAccessConditions, self).__init__(**kwargs) + self.if_sequence_number_less_than_or_equal_to = if_sequence_number_less_than_or_equal_to + self.if_sequence_number_less_than = if_sequence_number_less_than + self.if_sequence_number_equal_to = if_sequence_number_equal_to + + +class SignedIdentifier(msrest.serialization.Model): + """signed identifier. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. a unique id. + :type id: str + :param access_policy: An Access policy. + :type access_policy: ~azure.storage.blob.models.AccessPolicy + """ + + _validation = { + 'id': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, + } + _xml_map = { + 'name': 'SignedIdentifier' + } + + def __init__( + self, + *, + id: str, + access_policy: Optional["AccessPolicy"] = None, + **kwargs + ): + super(SignedIdentifier, self).__init__(**kwargs) + self.id = id + self.access_policy = access_policy + + +class SourceModifiedAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param source_if_modified_since: Specify this header value to operate only on a blob if it has + been modified since the specified date/time. + :type source_if_modified_since: ~datetime.datetime + :param source_if_unmodified_since: Specify this header value to operate only on a blob if it + has not been modified since the specified date/time. + :type source_if_unmodified_since: ~datetime.datetime + :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. + :type source_if_match: str + :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching + value. + :type source_if_none_match: str + :param source_if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a + matching value. + :type source_if_tags: str + """ + + _attribute_map = { + 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, + 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, + 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, + 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, + 'source_if_tags': {'key': 'sourceIfTags', 'type': 'str'}, + } + + def __init__( + self, + *, + source_if_modified_since: Optional[datetime.datetime] = None, + source_if_unmodified_since: Optional[datetime.datetime] = None, + source_if_match: Optional[str] = None, + source_if_none_match: Optional[str] = None, + source_if_tags: Optional[str] = None, + **kwargs + ): + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_modified_since = source_if_modified_since + self.source_if_unmodified_since = source_if_unmodified_since + self.source_if_match = source_if_match + self.source_if_none_match = source_if_none_match + self.source_if_tags = source_if_tags + + +class StaticWebsite(msrest.serialization.Model): + """The properties that enable an account to host a static website. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether this account is hosting a static website. + :type enabled: bool + :param index_document: The default name of the index page under each directory. + :type index_document: str + :param error_document404_path: The absolute path of the custom 404 page. + :type error_document404_path: str + :param default_index_document_path: Absolute path of the default index page. + :type default_index_document_path: str + """ + + _validation = { + 'enabled': {'required': True}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'index_document': {'key': 'IndexDocument', 'type': 'str'}, + 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str'}, + 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str'}, + } + + def __init__( + self, + *, + enabled: bool, + index_document: Optional[str] = None, + error_document404_path: Optional[str] = None, + default_index_document_path: Optional[str] = None, + **kwargs + ): + super(StaticWebsite, self).__init__(**kwargs) + self.enabled = enabled + self.index_document = index_document + self.error_document404_path = error_document404_path + self.default_index_document_path = default_index_document_path + + +class StorageError(msrest.serialization.Model): + """StorageError. + + :param message: + :type message: str + """ + + _attribute_map = { + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__( + self, + *, + message: Optional[str] = None, + **kwargs + ): + super(StorageError, self).__init__(**kwargs) + self.message = message + + +class StorageServiceProperties(msrest.serialization.Model): + """Storage Service Properties. + + :param logging: Azure Analytics Logging settings. + :type logging: ~azure.storage.blob.models.Logging + :param hour_metrics: a summary of request statistics grouped by API in hour or minute + aggregates for blobs. + :type hour_metrics: ~azure.storage.blob.models.Metrics + :param minute_metrics: a summary of request statistics grouped by API in hour or minute + aggregates for blobs. + :type minute_metrics: ~azure.storage.blob.models.Metrics + :param cors: The set of CORS rules. + :type cors: list[~azure.storage.blob.models.CorsRule] + :param default_service_version: The default version to use for requests to the Blob service if + an incoming request's version is not specified. Possible values include version 2008-10-27 and + all more recent versions. + :type default_service_version: str + :param delete_retention_policy: the retention policy which determines how long the associated + data should persist. + :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy + :param static_website: The properties that enable an account to host a static website. + :type static_website: ~azure.storage.blob.models.StaticWebsite + """ + + _attribute_map = { + 'logging': {'key': 'Logging', 'type': 'Logging'}, + 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, + 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, + 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, + 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str'}, + 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy'}, + 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite'}, + } + + def __init__( + self, + *, + logging: Optional["Logging"] = None, + hour_metrics: Optional["Metrics"] = None, + minute_metrics: Optional["Metrics"] = None, + cors: Optional[List["CorsRule"]] = None, + default_service_version: Optional[str] = None, + delete_retention_policy: Optional["RetentionPolicy"] = None, + static_website: Optional["StaticWebsite"] = None, + **kwargs + ): + super(StorageServiceProperties, self).__init__(**kwargs) + self.logging = logging + self.hour_metrics = hour_metrics + self.minute_metrics = minute_metrics + self.cors = cors + self.default_service_version = default_service_version + self.delete_retention_policy = delete_retention_policy + self.static_website = static_website + + +class StorageServiceStats(msrest.serialization.Model): + """Stats for the storage service. + + :param geo_replication: Geo-Replication information for the Secondary Storage Service. + :type geo_replication: ~azure.storage.blob.models.GeoReplication + """ + + _attribute_map = { + 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication'}, + } + + def __init__( + self, + *, + geo_replication: Optional["GeoReplication"] = None, + **kwargs + ): + super(StorageServiceStats, self).__init__(**kwargs) + self.geo_replication = geo_replication + + +class UserDelegationKey(msrest.serialization.Model): + """A user delegation key. + + All required parameters must be populated in order to send to Azure. + + :param signed_oid: Required. The Azure Active Directory object ID in GUID format. + :type signed_oid: str + :param signed_tid: Required. The Azure Active Directory tenant ID in GUID format. + :type signed_tid: str + :param signed_start: Required. The date-time the key is active. + :type signed_start: ~datetime.datetime + :param signed_expiry: Required. The date-time the key expires. + :type signed_expiry: ~datetime.datetime + :param signed_service: Required. Abbreviation of the Azure Storage service that accepts the + key. + :type signed_service: str + :param signed_version: Required. The service version that created the key. + :type signed_version: str + :param value: Required. The key as a base64 string. + :type value: str + """ + + _validation = { + 'signed_oid': {'required': True}, + 'signed_tid': {'required': True}, + 'signed_start': {'required': True}, + 'signed_expiry': {'required': True}, + 'signed_service': {'required': True}, + 'signed_version': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'signed_oid': {'key': 'SignedOid', 'type': 'str'}, + 'signed_tid': {'key': 'SignedTid', 'type': 'str'}, + 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601'}, + 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601'}, + 'signed_service': {'key': 'SignedService', 'type': 'str'}, + 'signed_version': {'key': 'SignedVersion', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'str'}, + } + + def __init__( + self, + *, + signed_oid: str, + signed_tid: str, + signed_start: datetime.datetime, + signed_expiry: datetime.datetime, + signed_service: str, + signed_version: str, + value: str, + **kwargs + ): + super(UserDelegationKey, self).__init__(**kwargs) + self.signed_oid = signed_oid + self.signed_tid = signed_tid + self.signed_start = signed_start + self.signed_expiry = signed_expiry + self.signed_service = signed_service + self.signed_version = signed_version + self.value = value diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/__init__.py new file mode 100644 index 0000000..62f85c9 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/__init__.py @@ -0,0 +1,25 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._container_operations import ContainerOperations +from ._directory_operations import DirectoryOperations +from ._blob_operations import BlobOperations +from ._page_blob_operations import PageBlobOperations +from ._append_blob_operations import AppendBlobOperations +from ._block_blob_operations import BlockBlobOperations + +__all__ = [ + 'ServiceOperations', + 'ContainerOperations', + 'DirectoryOperations', + 'BlobOperations', + 'PageBlobOperations', + 'AppendBlobOperations', + 'BlockBlobOperations', +] diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_append_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_append_blob_operations.py new file mode 100644 index 0000000..0825fcf --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_append_blob_operations.py @@ -0,0 +1,708 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class AppendBlobOperations(object): + """AppendBlobOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create( + self, + content_length, # type: int + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + blob_tags_string=None, # type: Optional[str] + blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Create Append Blob operation creates a new append blob. + + :param content_length: The length of the request. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + blob_type = "AppendBlob" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def append_block( + self, + content_length, # type: int + body, # type: IO + timeout=None, # type: Optional[int] + transactional_content_md5=None, # type: Optional[bytearray] + transactional_content_crc64=None, # type: Optional[bytearray] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Append Block operation commits a new block of data to the end of an existing append blob. + The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to + AppendBlob. Append Block is supported only on version 2015-02-21 version or later. + + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param append_position_access_conditions: Parameter group. + :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _max_size = None + _append_position = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if append_position_access_conditions is not None: + _max_size = append_position_access_conditions.max_size + _append_position = append_position_access_conditions.append_position + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "appendblock" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.append_block.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _max_size is not None: + header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') + if _append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + append_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def append_block_from_url( + self, + source_url, # type: str + content_length, # type: int + source_range=None, # type: Optional[str] + source_content_md5=None, # type: Optional[bytearray] + source_contentcrc64=None, # type: Optional[bytearray] + timeout=None, # type: Optional[int] + transactional_content_md5=None, # type: Optional[bytearray] + request_id_parameter=None, # type: Optional[str] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Append Block operation commits a new block of data to the end of an existing append blob + where the contents are read from a source url. The Append Block operation is permitted only if + the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on + version 2015-02-21 version or later. + + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param content_length: The length of the request. + :type content_length: long + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param append_position_access_conditions: Parameter group. + :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _max_size = None + _append_position = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if append_position_access_conditions is not None: + _max_size = append_position_access_conditions.max_size + _append_position = append_position_access_conditions.append_position + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + comp = "appendblock" + accept = "application/xml" + + # Construct URL + url = self.append_block_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _max_size is not None: + header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') + if _append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def seal( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Seal operation seals the Append Blob to make it read-only. Seal is supported only on + version 2019-12-12 version or later. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param append_position_access_conditions: Parameter group. + :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _append_position = None + if append_position_access_conditions is not None: + _append_position = append_position_access_conditions.append_position + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + comp = "seal" + accept = "application/xml" + + # Construct URL + url = self.seal.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) + + if cls: + return cls(pipeline_response, None, response_headers) + + seal.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_blob_operations.py new file mode 100644 index 0000000..a72d4dc --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_blob_operations.py @@ -0,0 +1,3150 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class BlobOperations(object): + """BlobOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def download( + self, + snapshot=None, # type: Optional[str] + version_id=None, # type: Optional[str] + timeout=None, # type: Optional[int] + range=None, # type: Optional[str] + range_get_content_md5=None, # type: Optional[bool] + range_get_content_crc64=None, # type: Optional[bool] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> IO + """The Download operation reads or downloads a blob from the system, including its metadata and + properties. You can also call Download to read a snapshot. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param range_get_content_md5: When set to true and specified together with the Range, the + service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB + in size. + :type range_get_content_md5: bool + :param range_get_content_crc64: When set to true and specified together with the Range, the + service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 + MB in size. + :type range_get_content_crc64: bool + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + accept = "application/xml" + + # Construct URL + url = self.download.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') + if range_get_content_crc64 is not None: + header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) + response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) + response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) + response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) + response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) + response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) + response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) + response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) + response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + download.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def get_properties( + self, + snapshot=None, # type: Optional[str] + version_id=None, # type: Optional[str] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and + system properties for the blob. It does not return the content of the blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-creation-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) + response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-incremental-copy']=self._deserialize('bool', response.headers.get('x-ms-incremental-copy')) + response_headers['x-ms-copy-destination-snapshot']=self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) + response_headers['x-ms-access-tier-inferred']=self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')) + response_headers['x-ms-archive-status']=self._deserialize('str', response.headers.get('x-ms-archive-status')) + response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) + response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) + response_headers['x-ms-expiry-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')) + response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) + response_headers['x-ms-rehydrate-priority']=self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')) + response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def delete( + self, + snapshot=None, # type: Optional[str] + version_id=None, # type: Optional[str] + timeout=None, # type: Optional[int] + delete_snapshots=None, # type: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] + request_id_parameter=None, # type: Optional[str] + blob_delete_type="Permanent", # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is + permanently removed from the storage account. If the storage account's soft delete feature is + enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible + immediately. However, the blob service retains the blob or snapshot for the number of days + specified by the DeleteRetentionPolicy section of [Storage service properties] (Set-Blob- + Service-Properties.md). After the specified number of days has passed, the blob's data is + permanently removed from the storage account. Note that you continue to be charged for the + soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify + the "include=deleted" query parameter to discover which blobs and snapshots have been soft + deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other + operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code + of 404 (ResourceNotFound). + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param delete_snapshots: Required if the blob has associated snapshots. Specify one of the + following two options: include: Delete the base blob and all of its snapshots. only: Delete + only the blob's snapshots and not the blob itself. + :type delete_snapshots: str or ~azure.storage.blob.models.DeleteSnapshotsOptionType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_delete_type: Optional. Only possible value is 'permanent', which specifies to + permanently delete a blob if blob soft delete is enabled. + :type blob_delete_type: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if blob_delete_type is not None: + query_parameters['deletetype'] = self._serialize.query("blob_delete_type", blob_delete_type, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if delete_snapshots is not None: + header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def set_access_control( + self, + timeout=None, # type: Optional[int] + owner=None, # type: Optional[str] + group=None, # type: Optional[str] + posix_permissions=None, # type: Optional[str] + posix_acl=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Set the owner, group, permissions, or access control list for a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_acl: Sets POSIX access control rights on files and directories. The value is a + comma-separated list of access control entries. Each access control entry (ACE) consists of a + scope, a type, a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type posix_acl: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "setAccessControl" + accept = "application/xml" + + # Construct URL + url = self.set_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def get_access_control( + self, + timeout=None, # type: Optional[int] + upn=None, # type: Optional[bool] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Get the owner, group, permissions, or access control list for a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If + "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response + headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If + "false", the values will be returned as Azure Active Directory Object IDs. The default value is + false. + :type upn: bool + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "getAccessControl" + accept = "application/xml" + + # Construct URL + url = self.get_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) + response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) + response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) + response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def rename( + self, + rename_source, # type: str + timeout=None, # type: Optional[int] + path_rename_mode=None, # type: Optional[Union[str, "_models.PathRenameMode"]] + directory_properties=None, # type: Optional[str] + posix_permissions=None, # type: Optional[str] + posix_umask=None, # type: Optional[str] + source_lease_id=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + directory_http_headers=None, # type: Optional["_models.DirectoryHttpHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Rename a blob/file. By default, the destination is overwritten and if the destination already + exists and has a lease the lease is broken. This operation supports conditional HTTP requests. + For more information, see `Specifying Conditional Headers for Blob Service Operations + `_. To fail if the destination already exists, use a conditional + request with If-None-Match: "*". + + :param rename_source: The file or directory to be renamed. The value must have the following + format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will + overwrite the existing properties; otherwise, the existing properties will be preserved. + :type rename_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param path_rename_mode: Determines the behavior of the rename operation. + :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode + :param directory_properties: Optional. User-defined properties to be stored with the file or + directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", + where each value is base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask + restricts permission settings for file and directory, and will only be applied when default Acl + does not exist in parent directory. If the umask bit has set, it means that the corresponding + permission will be disabled. Otherwise the corresponding permission will be determined by the + permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, + a default umask - 0027 will be used. + :type posix_umask: str + :param source_lease_id: A lease ID for the source path. If specified, the source path must have + an active lease and the lease ID must match. + :type source_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param directory_http_headers: Parameter group. + :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _cache_control = None + _content_type = None + _content_encoding = None + _content_language = None + _content_disposition = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if directory_http_headers is not None: + _cache_control = directory_http_headers.cache_control + _content_type = directory_http_headers.content_type + _content_encoding = directory_http_headers.content_encoding + _content_language = directory_http_headers.content_language + _content_disposition = directory_http_headers.content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + accept = "application/xml" + + # Construct URL + url = self.rename.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if path_rename_mode is not None: + query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def undelete( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Undelete a blob that was previously soft deleted. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "undelete" + accept = "application/xml" + + # Construct URL + url = self.undelete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + undelete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def set_expiry( + self, + expiry_options, # type: Union[str, "_models.BlobExpiryOptions"] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + expires_on=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Sets the time a blob will expire and be deleted. + + :param expiry_options: Required. Indicates mode of the expiry time. + :type expiry_options: str or ~azure.storage.blob.models.BlobExpiryOptions + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param expires_on: The time to set the blob to expiry. + :type expires_on: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "expiry" + accept = "application/xml" + + # Construct URL + url = self.set_expiry.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') + if expires_on is not None: + header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_expiry.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def set_http_headers( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Set HTTP Headers operation sets system properties on the blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_cache_control = None + _blob_content_type = None + _blob_content_md5 = None + _blob_content_encoding = None + _blob_content_language = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _blob_content_disposition = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_disposition = blob_http_headers.blob_content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.set_http_headers.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_http_headers.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def set_metadata( + self, + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or + more name-value pairs. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "metadata" + accept = "application/xml" + + # Construct URL + url = self.set_metadata.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def acquire_lease( + self, + timeout=None, # type: Optional[int] + duration=None, # type: Optional[int] + proposed_lease_id=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "acquire" + accept = "application/xml" + + # Construct URL + url = self.acquire_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def release_lease( + self, + lease_id, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "release" + accept = "application/xml" + + # Construct URL + url = self.release_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def renew_lease( + self, + lease_id, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "renew" + accept = "application/xml" + + # Construct URL + url = self.renew_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + renew_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def change_lease( + self, + lease_id, # type: str + proposed_lease_id, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "change" + accept = "application/xml" + + # Construct URL + url = self.change_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def break_lease( + self, + timeout=None, # type: Optional[int] + break_period=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param break_period: For a break operation, proposed duration the lease should continue before + it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining on the lease is used. A new + lease will not be available before the break period has expired, but the lease may be held for + longer than the break period. If this header does not appear with a break operation, a fixed- + duration lease breaks after the remaining lease period elapses, and an infinite lease breaks + immediately. + :type break_period: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "break" + accept = "application/xml" + + # Construct URL + url = self.break_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def create_snapshot( + self, + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Create Snapshot operation creates a read-only snapshot of a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "snapshot" + accept = "application/xml" + + # Construct URL + url = self.create_snapshot.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create_snapshot.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def start_copy_from_url( + self, + copy_source, # type: str + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] + rehydrate_priority=None, # type: Optional[Union[str, "_models.RehydratePriority"]] + request_id_parameter=None, # type: Optional[str] + blob_tags_string=None, # type: Optional[str] + seal_blob=None, # type: Optional[bool] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Start Copy From URL operation copies a blob or an internet resource to a new blob. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived + blob. + :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param seal_blob: Overrides the sealed state of the destination blob. Service version + 2019-12-12 and newer. + :type seal_blob: bool + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _source_if_tags = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_tags = source_modified_access_conditions.source_if_tags + accept = "application/xml" + + # Construct URL + url = self.start_copy_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if rehydrate_priority is not None: + header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + if _source_if_tags is not None: + header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if seal_blob is not None: + header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + + if cls: + return cls(pipeline_response, None, response_headers) + + start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def copy_from_url( + self, + copy_source, # type: str + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] + request_id_parameter=None, # type: Optional[str] + source_content_md5=None, # type: Optional[bytearray] + blob_tags_string=None, # type: Optional[str] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not + return a response until the copy is complete. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + x_ms_requires_sync = "true" + accept = "application/xml" + + # Construct URL + url = self.copy_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-requires-sync'] = self._serialize.header("x_ms_requires_sync", x_ms_requires_sync, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + + if cls: + return cls(pipeline_response, None, response_headers) + + copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def abort_copy_from_url( + self, + copy_id, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a + destination blob with zero length and full metadata. + + :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy + Blob operation. + :type copy_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "copy" + copy_action_abort_constant = "abort" + accept = "application/xml" + + # Construct URL + url = self.abort_copy_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def set_tier( + self, + tier, # type: Union[str, "_models.AccessTierRequired"] + snapshot=None, # type: Optional[str] + version_id=None, # type: Optional[str] + timeout=None, # type: Optional[int] + rehydrate_priority=None, # type: Optional[Union[str, "_models.RehydratePriority"]] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a + premium storage account and on a block blob in a blob storage account (locally redundant + storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of + the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not + update the blob's ETag. + + :param tier: Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierRequired + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived + blob. + :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + comp = "tier" + accept = "application/xml" + + # Construct URL + url = self.set_tier.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if rehydrate_priority is not None: + header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if response.status_code == 202: + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_tier.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def get_account_info( + self, + **kwargs # type: Any + ): + # type: (...) -> None + """Returns the sku name and account kind. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "account" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_account_info.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) + response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def query( + self, + snapshot=None, # type: Optional[str] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + query_request=None, # type: Optional["_models.QueryRequest"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> IO + """The Query operation enables users to select/project on blob data by providing simple query + expressions. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param query_request: the query request. + :type query_request: ~azure.storage.blob.models.QueryRequest + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "query" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.query.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if query_request is not None: + body_content = self._serialize.body(query_request, 'QueryRequest', is_xml=True) + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + query.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def get_tags( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + snapshot=None, # type: Optional[str] + version_id=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> "_models.BlobTags" + """The Get Tags operation enables users to get the tags associated with a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: BlobTags, or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlobTags + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobTags"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_tags = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + comp = "tags" + accept = "application/xml" + + # Construct URL + url = self.get_tags.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('BlobTags', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def set_tags( + self, + timeout=None, # type: Optional[int] + version_id=None, # type: Optional[str] + transactional_content_md5=None, # type: Optional[bytearray] + transactional_content_crc64=None, # type: Optional[bytearray] + request_id_parameter=None, # type: Optional[str] + tags=None, # type: Optional["_models.BlobTags"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Set Tags operation enables users to set tags on a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param tags: Blob tags. + :type tags: ~azure.storage.blob.models.BlobTags + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_tags = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + comp = "tags" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.set_tags.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if tags is not None: + body_content = self._serialize.body(tags, 'BlobTags', is_xml=True) + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_block_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_block_blob_operations.py new file mode 100644 index 0000000..f9804ce --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_block_blob_operations.py @@ -0,0 +1,1098 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class BlockBlobOperations(object): + """BlockBlobOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def upload( + self, + content_length, # type: int + body, # type: IO + timeout=None, # type: Optional[int] + transactional_content_md5=None, # type: Optional[bytearray] + metadata=None, # type: Optional[str] + tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] + request_id_parameter=None, # type: Optional[str] + blob_tags_string=None, # type: Optional[str] + blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Upload Block Blob operation updates the content of an existing block blob. Updating an + existing block blob overwrites any existing metadata on the blob. Partial updates are not + supported with Put Blob; the content of the existing blob is overwritten with the content of + the new blob. To perform a partial update of the content of a block blob, use the Put Block + List operation. + + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + blob_type = "BlockBlob" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.upload.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def put_blob_from_url( + self, + content_length, # type: int + copy_source, # type: str + timeout=None, # type: Optional[int] + transactional_content_md5=None, # type: Optional[bytearray] + metadata=None, # type: Optional[str] + tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] + request_id_parameter=None, # type: Optional[str] + source_content_md5=None, # type: Optional[bytearray] + blob_tags_string=None, # type: Optional[str] + copy_source_blob_properties=None, # type: Optional[bool] + blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Put Blob from URL operation creates a new Block Blob where the contents of the blob are + read from a given URL. This API is supported beginning with the 2020-04-08 version. Partial + updates are not supported with Put Blob from URL; the content of an existing blob is + overwritten with the content of the new blob. To perform partial updates to a block blob’s + contents using a source URL, use the Put Block from URL API in conjunction with Put Block List. + + :param content_length: The length of the request. + :type content_length: long + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param copy_source_blob_properties: Optional, default is true. Indicates if properties from + the source blob should be copied. + :type copy_source_blob_properties: bool + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _source_if_tags = None + if blob_http_headers is not None: + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_tags = source_modified_access_conditions.source_if_tags + blob_type = "BlockBlob" + accept = "application/xml" + + # Construct URL + url = self.put_blob_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + if _source_if_tags is not None: + header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if copy_source_blob_properties is not None: + header_parameters['x-ms-copy-source-blob-properties'] = self._serialize.header("copy_source_blob_properties", copy_source_blob_properties, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + put_blob_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def stage_block( + self, + block_id, # type: str + content_length, # type: int + body, # type: IO + transactional_content_md5=None, # type: Optional[bytearray] + transactional_content_crc64=None, # type: Optional[bytearray] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Stage Block operation creates a new block to be committed as part of a blob. + + :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the + string must be less than or equal to 64 bytes in size. For a given blob, the length of the + value specified for the blockid parameter must be the same size for each block. + :type block_id: str + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data. + :type body: IO + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "block" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.stage_block.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + stage_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def stage_block_from_url( + self, + block_id, # type: str + content_length, # type: int + source_url, # type: str + source_range=None, # type: Optional[str] + source_content_md5=None, # type: Optional[bytearray] + source_contentcrc64=None, # type: Optional[bytearray] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Stage Block operation creates a new block to be committed as part of a blob where the + contents are read from a URL. + + :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the + string must be less than or equal to 64 bytes in size. For a given blob, the length of the + value specified for the blockid parameter must be the same size for each block. + :type block_id: str + :param content_length: The length of the request. + :type content_length: long + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + comp = "block" + accept = "application/xml" + + # Construct URL + url = self.stage_block_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def commit_block_list( + self, + blocks, # type: "_models.BlockLookupList" + timeout=None, # type: Optional[int] + transactional_content_md5=None, # type: Optional[bytearray] + transactional_content_crc64=None, # type: Optional[bytearray] + metadata=None, # type: Optional[str] + tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] + request_id_parameter=None, # type: Optional[str] + blob_tags_string=None, # type: Optional[str] + blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Commit Block List operation writes a blob by specifying the list of block IDs that make up + the blob. In order to be written as part of a blob, a block must have been successfully written + to the server in a prior Put Block operation. You can call Put Block List to update a blob by + uploading only those blocks that have changed, then committing the new and existing blocks + together. You can do this by specifying whether to commit a block from the committed block list + or from the uncommitted block list, or to commit the most recently uploaded version of the + block, whichever list it may belong to. + + :param blocks: + :type blocks: ~azure.storage.blob.models.BlockLookupList + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_cache_control = None + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "blocklist" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.commit_block_list.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(blocks, 'BlockLookupList', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + commit_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def get_block_list( + self, + snapshot=None, # type: Optional[str] + list_type="committed", # type: Union[str, "_models.BlockListType"] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> "_models.BlockList" + """The Get Block List operation retrieves the list of blocks that have been uploaded as part of a + block blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param list_type: Specifies whether to return the list of committed blocks, the list of + uncommitted blocks, or both lists together. + :type list_type: str or ~azure.storage.blob.models.BlockListType + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: BlockList, or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlockList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.BlockList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + comp = "blocklist" + accept = "application/xml" + + # Construct URL + url = self.get_block_list.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('BlockList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_container_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_container_operations.py new file mode 100644 index 0000000..3219753 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_container_operations.py @@ -0,0 +1,1652 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class ContainerOperations(object): + """ContainerOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create( + self, + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + access=None, # type: Optional[Union[str, "_models.PublicAccessType"]] + request_id_parameter=None, # type: Optional[str] + container_cpk_scope_info=None, # type: Optional["_models.ContainerCpkScopeInfo"] + **kwargs # type: Any + ): + # type: (...) -> None + """creates a new container under the specified account. If the container with the same name + already exists, the operation fails. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param access: Specifies whether data in the container may be accessed publicly and the level + of access. + :type access: str or ~azure.storage.blob.models.PublicAccessType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param container_cpk_scope_info: Parameter group. + :type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _default_encryption_scope = None + _prevent_encryption_scope_override = None + if container_cpk_scope_info is not None: + _default_encryption_scope = container_cpk_scope_info.default_encryption_scope + _prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override + restype = "container" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if access is not None: + header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _default_encryption_scope is not None: + header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", _default_encryption_scope, 'str') + if _prevent_encryption_scope_override is not None: + header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", _prevent_encryption_scope_override, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{containerName}'} # type: ignore + + def get_properties( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """returns all user-defined metadata and system properties for the specified container. The data + returned does not include the container's list of blobs. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "container" + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) + response_headers['x-ms-has-immutability-policy']=self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')) + response_headers['x-ms-has-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')) + response_headers['x-ms-default-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')) + response_headers['x-ms-deny-encryption-scope-override']=self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{containerName}'} # type: ignore + + def delete( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """operation marks the specified container for deletion. The container and any blobs contained + within it are later deleted during garbage collection. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + restype = "container" + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{containerName}'} # type: ignore + + def set_metadata( + self, + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """operation sets one or more user-defined name-value pairs for the specified container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + restype = "container" + comp = "metadata" + accept = "application/xml" + + # Construct URL + url = self.set_metadata.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {'url': '/{containerName}'} # type: ignore + + def get_access_policy( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> List["_models.SignedIdentifier"] + """gets the permissions for the specified container. The permissions indicate whether container + data may be accessed publicly. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of SignedIdentifier, or the result of cls(response) + :rtype: list[~azure.storage.blob.models.SignedIdentifier] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "container" + comp = "acl" + accept = "application/xml" + + # Construct URL + url = self.get_access_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_access_policy.metadata = {'url': '/{containerName}'} # type: ignore + + def set_access_policy( + self, + timeout=None, # type: Optional[int] + access=None, # type: Optional[Union[str, "_models.PublicAccessType"]] + request_id_parameter=None, # type: Optional[str] + container_acl=None, # type: Optional[List["_models.SignedIdentifier"]] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """sets the permissions for the specified container. The permissions indicate whether blobs in a + container may be accessed publicly. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param access: Specifies whether data in the container may be accessed publicly and the level + of access. + :type access: str or ~azure.storage.blob.models.PublicAccessType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param container_acl: the acls for the container. + :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + restype = "container" + comp = "acl" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.set_access_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if access is not None: + header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True, 'itemsName': 'SignedIdentifier'}} + if container_acl is not None: + body_content = self._serialize.body(container_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_policy.metadata = {'url': '/{containerName}'} # type: ignore + + def restore( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + deleted_container_name=None, # type: Optional[str] + deleted_container_version=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Restores a previously-deleted container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param deleted_container_name: Optional. Version 2019-12-12 and later. Specifies the name of + the deleted container to restore. + :type deleted_container_name: str + :param deleted_container_version: Optional. Version 2019-12-12 and later. Specifies the + version of the deleted container to restore. + :type deleted_container_version: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "undelete" + accept = "application/xml" + + # Construct URL + url = self.restore.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if deleted_container_name is not None: + header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') + if deleted_container_version is not None: + header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + restore.metadata = {'url': '/{containerName}'} # type: ignore + + def rename( + self, + source_container_name, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + source_lease_id=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Renames an existing container. + + :param source_container_name: Required. Specifies the name of the container to rename. + :type source_container_name: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param source_lease_id: A lease ID for the source path. If specified, the source path must have + an active lease and the lease ID must match. + :type source_lease_id: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "rename" + accept = "application/xml" + + # Construct URL + url = self.rename.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-source-container-name'] = self._serialize.header("source_container_name", source_container_name, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {'url': '/{containerName}'} # type: ignore + + def submit_batch( + self, + content_length, # type: int + multipart_content_type, # type: str + body, # type: IO + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> IO + """The Batch operation allows multiple API calls to be embedded into a single HTTP request. + + :param content_length: The length of the request. + :type content_length: long + :param multipart_content_type: Required. The value of this header must be multipart/mixed with + a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. + :type multipart_content_type: str + :param body: Initial data. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "batch" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.submit_batch.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(body, 'IO', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + submit_batch.metadata = {'url': '/{containerName}'} # type: ignore + + def acquire_lease( + self, + timeout=None, # type: Optional[int] + duration=None, # type: Optional[int] + proposed_lease_id=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "acquire" + accept = "application/xml" + + # Construct URL + url = self.acquire_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {'url': '/{containerName}'} # type: ignore + + def release_lease( + self, + lease_id, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "release" + accept = "application/xml" + + # Construct URL + url = self.release_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {'url': '/{containerName}'} # type: ignore + + def renew_lease( + self, + lease_id, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "renew" + accept = "application/xml" + + # Construct URL + url = self.renew_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + renew_lease.metadata = {'url': '/{containerName}'} # type: ignore + + def break_lease( + self, + timeout=None, # type: Optional[int] + break_period=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param break_period: For a break operation, proposed duration the lease should continue before + it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining on the lease is used. A new + lease will not be available before the break period has expired, but the lease may be held for + longer than the break period. If this header does not appear with a break operation, a fixed- + duration lease breaks after the remaining lease period elapses, and an infinite lease breaks + immediately. + :type break_period: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "break" + accept = "application/xml" + + # Construct URL + url = self.break_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {'url': '/{containerName}'} # type: ignore + + def change_lease( + self, + lease_id, # type: str + proposed_lease_id, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "change" + accept = "application/xml" + + # Construct URL + url = self.change_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {'url': '/{containerName}'} # type: ignore + + def list_blob_flat_segment( + self, + prefix=None, # type: Optional[str] + marker=None, # type: Optional[str] + maxresults=None, # type: Optional[int] + include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.ListBlobsFlatSegmentResponse" + """[Update] The List Blobs operation returns a list of the blobs under the specified container. + + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. + :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListBlobsFlatSegmentResponse, or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsFlatSegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_blob_flat_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListBlobsFlatSegmentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_blob_flat_segment.metadata = {'url': '/{containerName}'} # type: ignore + + def list_blob_hierarchy_segment( + self, + delimiter, # type: str + prefix=None, # type: Optional[str] + marker=None, # type: Optional[str] + maxresults=None, # type: Optional[int] + include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.ListBlobsHierarchySegmentResponse" + """[Update] The List Blobs operation returns a list of the blobs under the specified container. + + :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix + element in the response body that acts as a placeholder for all blobs whose names begin with + the same substring up to the appearance of the delimiter character. The delimiter may be a + single character or a string. + :type delimiter: str + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. + :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} # type: ignore + + def get_account_info( + self, + **kwargs # type: Any + ): + # type: (...) -> None + """Returns the sku name and account kind. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "account" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_account_info.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) + response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {'url': '/{containerName}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_directory_operations.py new file mode 100644 index 0000000..f025757 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_directory_operations.py @@ -0,0 +1,748 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class DirectoryOperations(object): + """DirectoryOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create( + self, + timeout=None, # type: Optional[int] + directory_properties=None, # type: Optional[str] + posix_permissions=None, # type: Optional[str] + posix_umask=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + directory_http_headers=None, # type: Optional["_models.DirectoryHttpHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Create a directory. By default, the destination is overwritten and if the destination already + exists and has a lease the lease is broken. This operation supports conditional HTTP requests. + For more information, see `Specifying Conditional Headers for Blob Service Operations + `_. To fail if the destination already exists, use a conditional + request with If-None-Match: "*". + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param directory_properties: Optional. User-defined properties to be stored with the file or + directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", + where each value is base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask + restricts permission settings for file and directory, and will only be applied when default Acl + does not exist in parent directory. If the umask bit has set, it means that the corresponding + permission will be disabled. Otherwise the corresponding permission will be determined by the + permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, + a default umask - 0027 will be used. + :type posix_umask: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param directory_http_headers: Parameter group. + :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _cache_control = None + _content_type = None + _content_encoding = None + _content_language = None + _content_disposition = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + if directory_http_headers is not None: + _cache_control = directory_http_headers.cache_control + _content_type = directory_http_headers.content_type + _content_encoding = directory_http_headers.content_encoding + _content_language = directory_http_headers.content_language + _content_disposition = directory_http_headers.content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + resource = "directory" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['resource'] = self._serialize.query("resource", resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def rename( + self, + rename_source, # type: str + timeout=None, # type: Optional[int] + marker=None, # type: Optional[str] + path_rename_mode=None, # type: Optional[Union[str, "_models.PathRenameMode"]] + directory_properties=None, # type: Optional[str] + posix_permissions=None, # type: Optional[str] + posix_umask=None, # type: Optional[str] + source_lease_id=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + directory_http_headers=None, # type: Optional["_models.DirectoryHttpHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Rename a directory. By default, the destination is overwritten and if the destination already + exists and has a lease the lease is broken. This operation supports conditional HTTP requests. + For more information, see `Specifying Conditional Headers for Blob Service Operations + `_. To fail if the destination already exists, use a conditional + request with If-None-Match: "*". + + :param rename_source: The file or directory to be renamed. The value must have the following + format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will + overwrite the existing properties; otherwise, the existing properties will be preserved. + :type rename_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param marker: When renaming a directory, the number of paths that are renamed with each + invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation + token is returned in this response header. When a continuation token is returned in the + response, it must be specified in a subsequent invocation of the rename operation to continue + renaming the directory. + :type marker: str + :param path_rename_mode: Determines the behavior of the rename operation. + :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode + :param directory_properties: Optional. User-defined properties to be stored with the file or + directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", + where each value is base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask + restricts permission settings for file and directory, and will only be applied when default Acl + does not exist in parent directory. If the umask bit has set, it means that the corresponding + permission will be disabled. Otherwise the corresponding permission will be determined by the + permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, + a default umask - 0027 will be used. + :type posix_umask: str + :param source_lease_id: A lease ID for the source path. If specified, the source path must have + an active lease and the lease ID must match. + :type source_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param directory_http_headers: Parameter group. + :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _cache_control = None + _content_type = None + _content_encoding = None + _content_language = None + _content_disposition = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if directory_http_headers is not None: + _cache_control = directory_http_headers.cache_control + _content_type = directory_http_headers.content_type + _content_encoding = directory_http_headers.content_encoding + _content_language = directory_http_headers.content_language + _content_disposition = directory_http_headers.content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + accept = "application/xml" + + # Construct URL + url = self.rename.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') + if path_rename_mode is not None: + query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def delete( + self, + recursive_directory_delete, # type: bool + timeout=None, # type: Optional[int] + marker=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Deletes the directory. + + :param recursive_directory_delete: If "true", all paths beneath the directory will be deleted. + If "false" and the directory is non-empty, an error occurs. + :type recursive_directory_delete: bool + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param marker: When renaming a directory, the number of paths that are renamed with each + invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation + token is returned in this response header. When a continuation token is returned in the + response, it must be specified in a subsequent invocation of the rename operation to continue + renaming the directory. + :type marker: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') + if marker is not None: + query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def set_access_control( + self, + timeout=None, # type: Optional[int] + owner=None, # type: Optional[str] + group=None, # type: Optional[str] + posix_permissions=None, # type: Optional[str] + posix_acl=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Set the owner, group, permissions, or access control list for a directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_acl: Sets POSIX access control rights on files and directories. The value is a + comma-separated list of access control entries. Each access control entry (ACE) consists of a + scope, a type, a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type posix_acl: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "setAccessControl" + accept = "application/xml" + + # Construct URL + url = self.set_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def get_access_control( + self, + timeout=None, # type: Optional[int] + upn=None, # type: Optional[bool] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Get the owner, group, permissions, or access control list for a directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If + "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response + headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If + "false", the values will be returned as Azure Active Directory Object IDs. The default value is + false. + :type upn: bool + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "getAccessControl" + accept = "application/xml" + + # Construct URL + url = self.get_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) + response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) + response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) + response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_page_blob_operations.py new file mode 100644 index 0000000..ea4b17c --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_page_blob_operations.py @@ -0,0 +1,1406 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class PageBlobOperations(object): + """PageBlobOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create( + self, + content_length, # type: int + blob_content_length, # type: int + timeout=None, # type: Optional[int] + tier=None, # type: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] + metadata=None, # type: Optional[str] + blob_sequence_number=0, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + blob_tags_string=None, # type: Optional[str] + blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Create operation creates a new page blob. + + :param content_length: The length of the request. + :type content_length: long + :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 + TB. The page blob size must be aligned to a 512-byte boundary. + :type blob_content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param tier: Optional. Indicates the tier to be set on the page blob. + :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled + value that you can use to track requests. The value of the sequence number must be between 0 + and 2^63 - 1. + :type blob_sequence_number: long + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + blob_type = "PageBlob" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') + if blob_sequence_number is not None: + header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def upload_pages( + self, + content_length, # type: int + body, # type: IO + transactional_content_md5=None, # type: Optional[bytearray] + transactional_content_crc64=None, # type: Optional[bytearray] + timeout=None, # type: Optional[int] + range=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Upload Pages operation writes a range of pages to a page blob. + + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data. + :type body: IO + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param sequence_number_access_conditions: Parameter group. + :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if sequence_number_access_conditions is not None: + _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + comp = "page" + page_write = "update" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.upload_pages.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') + if _if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') + if _if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def clear_pages( + self, + content_length, # type: int + timeout=None, # type: Optional[int] + range=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Clear Pages operation clears a set of pages from a page blob. + + :param content_length: The length of the request. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param sequence_number_access_conditions: Parameter group. + :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if sequence_number_access_conditions is not None: + _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + comp = "page" + page_write = "clear" + accept = "application/xml" + + # Construct URL + url = self.clear_pages.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') + if _if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') + if _if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + clear_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def upload_pages_from_url( + self, + source_url, # type: str + source_range, # type: str + content_length, # type: int + range, # type: str + source_content_md5=None, # type: Optional[bytearray] + source_contentcrc64=None, # type: Optional[bytearray] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Upload Pages operation writes a range of pages to a page blob where the contents are read + from a URL. + + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param source_range: Bytes of source data in the specified range. The length of this range + should match the ContentLength header and x-ms-range/Range destination range header. + :type source_range: str + :param content_length: The length of the request. + :type content_length: long + :param range: The range of bytes to which the source range would be written. The range should + be 512 aligned and range-end is required. + :type range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param sequence_number_access_conditions: Parameter group. + :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if sequence_number_access_conditions is not None: + _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + comp = "page" + page_write = "update" + accept = "application/xml" + + # Construct URL + url = self.upload_pages_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') + if _if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') + if _if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def get_page_ranges( + self, + snapshot=None, # type: Optional[str] + timeout=None, # type: Optional[int] + range=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> "_models.PageList" + """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot + of a page blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PageList, or the result of cls(response) + :rtype: ~azure.storage.blob.models.PageList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "pagelist" + accept = "application/xml" + + # Construct URL + url = self.get_page_ranges.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('PageList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def get_page_ranges_diff( + self, + snapshot=None, # type: Optional[str] + timeout=None, # type: Optional[int] + prevsnapshot=None, # type: Optional[str] + prev_snapshot_url=None, # type: Optional[str] + range=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> "_models.PageList" + """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that + were changed between target blob and previous snapshot. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating a Snapshot of + a Blob.`. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a + DateTime value that specifies that the response will contain only pages that were changed + between target blob and previous snapshot. Changed pages include both updated and cleared + pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is + the older of the two. Note that incremental snapshots are currently supported only for blobs + created on or after January 1, 2016. + :type prevsnapshot: str + :param prev_snapshot_url: Optional. This header is only supported in service versions + 2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The + response will only contain pages that were changed between the target blob and its previous + snapshot. + :type prev_snapshot_url: str + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PageList, or the result of cls(response) + :rtype: ~azure.storage.blob.models.PageList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "pagelist" + accept = "application/xml" + + # Construct URL + url = self.get_page_ranges_diff.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if prevsnapshot is not None: + query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if prev_snapshot_url is not None: + header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('PageList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def resize( + self, + blob_content_length, # type: int + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Resize the Blob. + + :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 + TB. The page blob size must be aligned to a 512-byte boundary. + :type blob_content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.resize.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + resize.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def update_sequence_number( + self, + sequence_number_action, # type: Union[str, "_models.SequenceNumberActionType"] + timeout=None, # type: Optional[int] + blob_sequence_number=0, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Update the sequence number of the blob. + + :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the + request. This property applies to page blobs only. This property indicates how the service + should modify the blob's sequence number. + :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled + value that you can use to track requests. The value of the sequence number must be between 0 + and 2^63 - 1. + :type blob_sequence_number: long + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.update_sequence_number.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'str') + if blob_sequence_number is not None: + header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def copy_incremental( + self, + copy_source, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Copy Incremental operation copies a snapshot of the source page blob to a destination page + blob. The snapshot is copied such that only the differential changes between the previously + copied snapshot are transferred to the destination. The copied snapshots are complete copies of + the original snapshot and can be read or copied from as usual. This API is supported since REST + version 2016-05-31. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "incrementalcopy" + accept = "application/xml" + + # Construct URL + url = self.copy_incremental.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + + if cls: + return cls(pipeline_response, None, response_headers) + + copy_incremental.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_service_operations.py new file mode 100644 index 0000000..72f7a73 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_generated/operations/_service_operations.py @@ -0,0 +1,703 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class ServiceOperations(object): + """ServiceOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def set_properties( + self, + storage_service_properties, # type: "_models.StorageServiceProperties" + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Sets properties for a storage account's Blob service endpoint, including properties for Storage + Analytics and CORS (Cross-Origin Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. + :type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "properties" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.set_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {'url': '/'} # type: ignore + + def get_properties( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.StorageServiceProperties" + """gets the properties of a storage account's Blob service, including properties for Storage + Analytics and CORS (Cross-Origin Resource Sharing) rules. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageServiceProperties, or the result of cls(response) + :rtype: ~azure.storage.blob.models.StorageServiceProperties + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = self._deserialize('StorageServiceProperties', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_properties.metadata = {'url': '/'} # type: ignore + + def get_statistics( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.StorageServiceStats" + """Retrieves statistics related to replication for the Blob service. It is only available on the + secondary location endpoint when read-access geo-redundant replication is enabled for the + storage account. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageServiceStats, or the result of cls(response) + :rtype: ~azure.storage.blob.models.StorageServiceStats + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceStats"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "stats" + accept = "application/xml" + + # Construct URL + url = self.get_statistics.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('StorageServiceStats', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_statistics.metadata = {'url': '/'} # type: ignore + + def list_containers_segment( + self, + prefix=None, # type: Optional[str] + marker=None, # type: Optional[str] + maxresults=None, # type: Optional[int] + include=None, # type: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.ListContainersSegmentResponse" + """The List Containers Segment operation returns a list of the containers under the specified + account. + + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. + :type maxresults: int + :param include: Include this parameter to specify that the container's metadata be returned as + part of the response body. + :type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListContainersSegmentResponse, or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainersSegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_containers_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = self._deserialize('ListContainersSegmentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_containers_segment.metadata = {'url': '/'} # type: ignore + + def get_user_delegation_key( + self, + key_info, # type: "_models.KeyInfo" + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.UserDelegationKey" + """Retrieves a user delegation key for the Blob service. This is only a valid operation when using + bearer token authentication. + + :param key_info: + :type key_info: ~azure.storage.blob.models.KeyInfo + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: UserDelegationKey, or the result of cls(response) + :rtype: ~azure.storage.blob.models.UserDelegationKey + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.UserDelegationKey"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "userdelegationkey" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.get_user_delegation_key.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(key_info, 'KeyInfo', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('UserDelegationKey', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_user_delegation_key.metadata = {'url': '/'} # type: ignore + + def get_account_info( + self, + **kwargs # type: Any + ): + # type: (...) -> None + """Returns the sku name and account kind. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "account" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_account_info.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) + response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) + response_headers['x-ms-is-hns-enabled']=self._deserialize('bool', response.headers.get('x-ms-is-hns-enabled')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {'url': '/'} # type: ignore + + def submit_batch( + self, + content_length, # type: int + multipart_content_type, # type: str + body, # type: IO + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> IO + """The Batch operation allows multiple API calls to be embedded into a single HTTP request. + + :param content_length: The length of the request. + :type content_length: long + :param multipart_content_type: Required. The value of this header must be multipart/mixed with + a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. + :type multipart_content_type: str + :param body: Initial data. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "batch" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.submit_batch.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(body, 'IO', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + submit_batch.metadata = {'url': '/'} # type: ignore + + def filter_blobs( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + where=None, # type: Optional[str] + marker=None, # type: Optional[str] + maxresults=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.FilterBlobSegment" + """The Filter Blobs operation enables callers to list blobs across all containers whose tags match + a given search expression. Filter blobs searches across all containers within a storage + account but can be scoped within the expression to a single container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param where: Filters the results to return only to return only blobs whose tags match the + specified expression. + :type where: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. + :type maxresults: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: FilterBlobSegment, or the result of cls(response) + :rtype: ~azure.storage.blob.models.FilterBlobSegment + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "blobs" + accept = "application/xml" + + # Construct URL + url = self.filter_blobs.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if where is not None: + query_parameters['where'] = self._serialize.query("where", where, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('FilterBlobSegment', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + filter_blobs.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_lease.py b/azure/multiapi/storagev2/blob/v2020_06_12/_lease.py new file mode 100644 index 0000000..d495d6e --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_lease.py @@ -0,0 +1,331 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import uuid + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, TypeVar, TYPE_CHECKING +) + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator import distributed_trace + +from ._shared.response_handlers import return_response_headers, process_storage_error +from ._serialize import get_modify_conditions + +if TYPE_CHECKING: + from datetime import datetime + + BlobClient = TypeVar("BlobClient") + ContainerClient = TypeVar("ContainerClient") + + +class BlobLeaseClient(object): + """Creates a new BlobLeaseClient. + + This client provides lease operations on a BlobClient or ContainerClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the blob or container to lease. + :type client: ~azure.storage.blob.BlobClient or + ~azure.storage.blob.ContainerClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + def __init__( + self, client, lease_id=None + ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs + # type: (Union[BlobClient, ContainerClient], Optional[str]) -> None + self.id = lease_id or str(uuid.uuid4()) + self.last_modified = None + self.etag = None + if hasattr(client, 'blob_name'): + self._client = client._client.blob # type: ignore # pylint: disable=protected-access + elif hasattr(client, 'container_name'): + self._client = client._client.container # type: ignore # pylint: disable=protected-access + else: + raise TypeError("Lease must use either BlobClient or ContainerClient.") + + def __enter__(self): + return self + + def __exit__(self, *args): + self.release() + + @distributed_trace + def acquire(self, lease_duration=-1, **kwargs): + # type: (int, **Any) -> None + """Requests a new lease. + + If the container does not have an active lease, the Blob service creates a + lease on the container and returns a new lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.acquire_lease( + timeout=kwargs.pop('timeout', None), + duration=lease_duration, + proposed_lease_id=self.id, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + self.etag = response.get('etag') # type: str + + @distributed_trace + def renew(self, **kwargs): + # type: (Any) -> None + """Renews the lease. + + The lease can be renewed if the lease ID specified in the + lease client matches that associated with the container or blob. Note that + the lease may be renewed even if it has expired as long as the container + or blob has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.renew_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def release(self, **kwargs): + # type: (Any) -> None + """Release the lease. + + The lease may be released if the client lease id specified matches + that associated with the container or blob. Releasing the lease allows another client + to immediately acquire the lease for the container or blob as soon as the release is complete. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.release_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """Change the lease ID of an active lease. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.change_lease( + lease_id=self.id, + proposed_lease_id=proposed_lease_id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def break_lease(self, lease_break_period=None, **kwargs): + # type: (Optional[int], Any) -> int + """Break the lease, if the container or blob has an active lease. + + Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. When a lease + is broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the container or blob. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :param int lease_break_period: + This is the proposed duration of seconds that the lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the lease. If longer, the time remaining on the lease is used. + A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.break_lease( + timeout=kwargs.pop('timeout', None), + break_period=lease_break_period, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2020_06_12/_list_blobs_helper.py new file mode 100644 index 0000000..309d37b --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_list_blobs_helper.py @@ -0,0 +1,236 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from azure.core.paging import PageIterator, ItemPaged +from azure.core.exceptions import HttpResponseError +from ._deserialize import get_blob_properties_from_generated_code, parse_tags +from ._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix, FilterBlobItem +from ._models import BlobProperties, FilteredBlob +from ._shared.models import DictMixin +from ._shared.response_handlers import return_context_and_deserialized, process_storage_error + + +class BlobPropertiesPaged(PageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str container: The name of the container. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + prefix=None, + results_per_page=None, + continuation_token=None, + delimiter=None, + location_mode=None): + super(BlobPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.container = container + self.delimiter = delimiter + self.current_page = None + self.location_mode = location_mode + + def _get_next_cb(self, continuation_token): + try: + return self._command( + prefix=self.prefix, + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.container = self._response.container_name + self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] + + return self._response.next_marker or None, self.current_page + + def _build_item(self, item): + if isinstance(item, BlobProperties): + return item + if isinstance(item, BlobItemInternal): + blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access + blob.container = self.container + return blob + return item + + +class BlobPrefixPaged(BlobPropertiesPaged): + def __init__(self, *args, **kwargs): + super(BlobPrefixPaged, self).__init__(*args, **kwargs) + self.name = self.prefix + + def _extract_data_cb(self, get_next_return): + continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) + self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items + self.current_page = [self._build_item(item) for item in self.current_page] + self.delimiter = self._response.delimiter + + return continuation_token, self.current_page + + def _build_item(self, item): + item = super(BlobPrefixPaged, self)._build_item(item) + if isinstance(item, GenBlobPrefix): + return BlobPrefix( + self._command, + container=self.container, + prefix=item.name, + results_per_page=self.results_per_page, + location_mode=self.location_mode) + return item + + +class BlobPrefix(ItemPaged, DictMixin): + """An Iterable of Blob properties. + + Returned from walk_blobs when a delimiter is used. + Can be thought of as a virtual blob directory. + + :ivar str name: The prefix, or "directory name" of the blob. + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str next_marker: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str marker: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__(self, *args, **kwargs): + super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) + self.name = kwargs.get('prefix') + self.prefix = kwargs.get('prefix') + self.results_per_page = kwargs.get('results_per_page') + self.container = kwargs.get('container') + self.delimiter = kwargs.get('delimiter') + self.location_mode = kwargs.get('location_mode') + + +class FilteredBlobPaged(PageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.FilteredBlob) + :ivar str container: The container that the blobs are listed from. + + :param callable command: Function to retrieve the next page of items. + :param str container: The name of the container. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + results_per_page=None, + continuation_token=None, + location_mode=None): + super(FilteredBlobPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.marker = continuation_token + self.results_per_page = results_per_page + self.container = container + self.current_page = None + self.location_mode = location_mode + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.marker = self._response.next_marker + self.current_page = [self._build_item(item) for item in self._response.blobs] + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_item(item): + if isinstance(item, FilterBlobItem): + tags = parse_tags(item.tags) + blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags) + return blob + return item diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_models.py b/azure/multiapi/storagev2/blob/v2020_06_12/_models.py new file mode 100644 index 0000000..00a53dc --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_models.py @@ -0,0 +1,1111 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines + +from enum import Enum + +from azure.core.paging import PageIterator +from azure.core.exceptions import HttpResponseError +from ._generated.models import ArrowField + +from ._shared import decode_base64_to_text +from ._shared.response_handlers import return_context_and_deserialized, process_storage_error +from ._shared.models import DictMixin, get_enum_value +from ._generated.models import Logging as GeneratedLogging +from ._generated.models import Metrics as GeneratedMetrics +from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy +from ._generated.models import StaticWebsite as GeneratedStaticWebsite +from ._generated.models import CorsRule as GeneratedCorsRule +from ._generated.models import AccessPolicy as GenAccessPolicy + + +class BlobType(str, Enum): + + BlockBlob = "BlockBlob" + PageBlob = "PageBlob" + AppendBlob = "AppendBlob" + + +class BlockState(str, Enum): + """Block blob block types.""" + + Committed = 'Committed' #: Committed blocks. + Latest = 'Latest' #: Latest blocks. + Uncommitted = 'Uncommitted' #: Uncommitted blocks. + + +class StandardBlobTier(str, Enum): + """ + Specifies the blob tier to set the blob to. This is only applicable for + block blobs on standard storage accounts. + """ + + Archive = 'Archive' #: Archive + Cool = 'Cool' #: Cool + Hot = 'Hot' #: Hot + + +class PremiumPageBlobTier(str, Enum): + """ + Specifies the page blob tier to set the blob to. This is only applicable to page + blobs on premium storage accounts. Please take a look at: + https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets + for detailed information on the corresponding IOPS and throughput per PageBlobTier. + """ + + P4 = 'P4' #: P4 Tier + P6 = 'P6' #: P6 Tier + P10 = 'P10' #: P10 Tier + P20 = 'P20' #: P20 Tier + P30 = 'P30' #: P30 Tier + P40 = 'P40' #: P40 Tier + P50 = 'P50' #: P50 Tier + P60 = 'P60' #: P60 Tier + + +class SequenceNumberAction(str, Enum): + """Sequence number actions.""" + + Increment = 'increment' + """ + Increments the value of the sequence number by 1. If specifying this option, + do not include the x-ms-blob-sequence-number header. + """ + + Max = 'max' + """ + Sets the sequence number to be the higher of the value included with the + request and the value currently stored for the blob. + """ + + Update = 'update' + """Sets the sequence number to the value included with the request.""" + + +class PublicAccess(str, Enum): + """ + Specifies whether data in the container may be accessed publicly and the level of access. + """ + + OFF = 'off' + """ + Specifies that there is no public read access for both the container and blobs within the container. + Clients cannot enumerate the containers within the storage account as well as the blobs within the container. + """ + + Blob = 'blob' + """ + Specifies public read access for blobs. Blob data within this container can be read + via anonymous request, but container data is not available. Clients cannot enumerate + blobs within the container via anonymous request. + """ + + Container = 'container' + """ + Specifies full public read access for container and blob data. Clients can enumerate + blobs within the container via anonymous request, but cannot enumerate containers + within the storage account. + """ + + +class BlobAnalyticsLogging(GeneratedLogging): + """Azure Analytics Logging settings. + + :keyword str version: + The version of Storage Analytics to configure. The default value is 1.0. + :keyword bool delete: + Indicates whether all delete requests should be logged. The default value is `False`. + :keyword bool read: + Indicates whether all read requests should be logged. The default value is `False`. + :keyword bool write: + Indicates whether all write requests should be logged. The default value is `False`. + :keyword ~azure.storage.blob.RetentionPolicy retention_policy: + Determines how long the associated data should persist. If not specified the retention + policy will be disabled by default. + """ + + def __init__(self, **kwargs): + self.version = kwargs.get('version', u'1.0') + self.delete = kwargs.get('delete', False) + self.read = kwargs.get('read', False) + self.write = kwargs.get('write', False) + self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + version=generated.version, + delete=generated.delete, + read=generated.read, + write=generated.write, + retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access + ) + + +class Metrics(GeneratedMetrics): + """A summary of request statistics grouped by API in hour or minute aggregates + for blobs. + + :keyword str version: + The version of Storage Analytics to configure. The default value is 1.0. + :keyword bool enabled: + Indicates whether metrics are enabled for the Blob service. + The default value is `False`. + :keyword bool include_apis: + Indicates whether metrics should generate summary statistics for called API operations. + :keyword ~azure.storage.blob.RetentionPolicy retention_policy: + Determines how long the associated data should persist. If not specified the retention + policy will be disabled by default. + """ + + def __init__(self, **kwargs): + self.version = kwargs.get('version', u'1.0') + self.enabled = kwargs.get('enabled', False) + self.include_apis = kwargs.get('include_apis') + self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + version=generated.version, + enabled=generated.enabled, + include_apis=generated.include_apis, + retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access + ) + + +class RetentionPolicy(GeneratedRetentionPolicy): + """The retention policy which determines how long the associated data should + persist. + + :param bool enabled: + Indicates whether a retention policy is enabled for the storage service. + The default value is False. + :param int days: + Indicates the number of days that metrics or logging or + soft-deleted data should be retained. All data older than this value will + be deleted. If enabled=True, the number of days must be specified. + """ + + def __init__(self, enabled=False, days=None): + super(RetentionPolicy, self).__init__(enabled=enabled, days=days, allow_permanent_delete=None) + if self.enabled and (self.days is None): + raise ValueError("If policy is enabled, 'days' must be specified.") + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + enabled=generated.enabled, + days=generated.days, + ) + + +class StaticWebsite(GeneratedStaticWebsite): + """The properties that enable an account to host a static website. + + :keyword bool enabled: + Indicates whether this account is hosting a static website. + The default value is `False`. + :keyword str index_document: + The default name of the index page under each directory. + :keyword str error_document404_path: + The absolute path of the custom 404 page. + :keyword str default_index_document_path: + Absolute path of the default index page. + """ + + def __init__(self, **kwargs): + self.enabled = kwargs.get('enabled', False) + if self.enabled: + self.index_document = kwargs.get('index_document') + self.error_document404_path = kwargs.get('error_document404_path') + self.default_index_document_path = kwargs.get('default_index_document_path') + else: + self.index_document = None + self.error_document404_path = None + self.default_index_document_path = None + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + enabled=generated.enabled, + index_document=generated.index_document, + error_document404_path=generated.error_document404_path, + default_index_document_path=generated.default_index_document_path + ) + + +class CorsRule(GeneratedCorsRule): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + :param list(str) allowed_origins: + A list of origin domains that will be allowed via CORS, or "*" to allow + all domains. The list of must contain at least one entry. Limited to 64 + origin domains. Each allowed origin can have up to 256 characters. + :param list(str) allowed_methods: + A list of HTTP methods that are allowed to be executed by the origin. + The list of must contain at least one entry. For Azure Storage, + permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. + :keyword list(str) allowed_headers: + Defaults to an empty list. A list of headers allowed to be part of + the cross-origin request. Limited to 64 defined headers and 2 prefixed + headers. Each header can be up to 256 characters. + :keyword list(str) exposed_headers: + Defaults to an empty list. A list of response headers to expose to CORS + clients. Limited to 64 defined headers and two prefixed headers. Each + header can be up to 256 characters. + :keyword int max_age_in_seconds: + The number of seconds that the client/browser should cache a + preflight response. + """ + + def __init__(self, allowed_origins, allowed_methods, **kwargs): + self.allowed_origins = ','.join(allowed_origins) + self.allowed_methods = ','.join(allowed_methods) + self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) + self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) + self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) + + @classmethod + def _from_generated(cls, generated): + return cls( + [generated.allowed_origins], + [generated.allowed_methods], + allowed_headers=[generated.allowed_headers], + exposed_headers=[generated.exposed_headers], + max_age_in_seconds=generated.max_age_in_seconds, + ) + + +class ContainerProperties(DictMixin): + """Blob container's properties class. + + Returned ``ContainerProperties`` instances expose these values through a + dictionary interface, for example: ``container_props["last_modified"]``. + Additionally, the container name is available as ``container_props["name"]``. + + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the container was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar ~azure.storage.blob.LeaseProperties lease: + Stores all the lease information for the container. + :ivar str public_access: Specifies whether data in the container may be accessed + publicly and the level of access. + :ivar bool has_immutability_policy: + Represents whether the container has an immutability policy. + :ivar bool has_legal_hold: + Represents whether the container has a legal hold. + :ivar dict metadata: A dict with name-value pairs to associate with the + container as metadata. + :ivar ~azure.storage.blob.ContainerEncryptionScope encryption_scope: + The default encryption scope configuration for the container. + :ivar bool deleted: + Whether this container was deleted. + :ivar str version: + The version of a deleted container. + """ + + def __init__(self, **kwargs): + self.name = None + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.lease = LeaseProperties(**kwargs) + self.public_access = kwargs.get('x-ms-blob-public-access') + self.has_immutability_policy = kwargs.get('x-ms-has-immutability-policy') + self.deleted = None + self.version = None + self.has_legal_hold = kwargs.get('x-ms-has-legal-hold') + self.metadata = kwargs.get('metadata') + self.encryption_scope = None + default_encryption_scope = kwargs.get('x-ms-default-encryption-scope') + if default_encryption_scope: + self.encryption_scope = ContainerEncryptionScope( + default_encryption_scope=default_encryption_scope, + prevent_encryption_scope_override=kwargs.get('x-ms-deny-encryption-scope-override', False) + ) + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.last_modified = generated.properties.last_modified + props.etag = generated.properties.etag + props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + props.public_access = generated.properties.public_access + props.has_immutability_policy = generated.properties.has_immutability_policy + props.deleted = generated.deleted + props.version = generated.version + props.has_legal_hold = generated.properties.has_legal_hold + props.metadata = generated.metadata + props.encryption_scope = ContainerEncryptionScope._from_generated(generated) #pylint: disable=protected-access + return props + + +class ContainerPropertiesPaged(PageIterator): + """An Iterable of Container properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A container name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.ContainerProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only containers whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of container names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(ContainerPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [self._build_item(item) for item in self._response.container_items] + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_item(item): + return ContainerProperties._from_generated(item) # pylint: disable=protected-access + + +class BlobProperties(DictMixin): + """ + Blob Properties. + + :ivar str name: + The name of the blob. + :ivar str container: + The container in which the blob resides. + :ivar str snapshot: + Datetime value that uniquely identifies the blob snapshot. + :ivar ~azure.blob.storage.BlobType blob_type: + String indicating this blob's type. + :ivar dict metadata: + Name-value pairs associated with the blob as metadata. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the blob was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar int size: + The size of the content returned. If the entire blob was requested, + the length of blob in bytes. If a subset of the blob was requested, the + length of the returned subset. + :ivar str content_range: + Indicates the range of bytes returned in the event that the client + requested a subset of the blob. + :ivar int append_blob_committed_block_count: + (For Append Blobs) Number of committed blocks in the blob. + :ivar bool is_append_blob_sealed: + Indicate if the append blob is sealed or not. + + .. versionadded:: 12.4.0 + + :ivar int page_blob_sequence_number: + (For Page Blobs) Sequence number for page blob used for coordinating + concurrent writes. + :ivar bool server_encrypted: + Set to true if the blob is encrypted on the server. + :ivar ~azure.storage.blob.CopyProperties copy: + Stores all the copy properties for the blob. + :ivar ~azure.storage.blob.ContentSettings content_settings: + Stores all the content settings for the blob. + :ivar ~azure.storage.blob.LeaseProperties lease: + Stores all the lease information for the blob. + :ivar ~azure.storage.blob.StandardBlobTier blob_tier: + Indicates the access tier of the blob. The hot tier is optimized + for storing data that is accessed frequently. The cool storage tier + is optimized for storing data that is infrequently accessed and stored + for at least a month. The archive tier is optimized for storing + data that is rarely accessed and stored for at least six months + with flexible latency requirements. + :ivar str rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :ivar ~datetime.datetime blob_tier_change_time: + Indicates when the access tier was last changed. + :ivar bool blob_tier_inferred: + Indicates whether the access tier was inferred by the service. + If false, it indicates that the tier was set explicitly. + :ivar bool deleted: + Whether this blob was deleted. + :ivar ~datetime.datetime deleted_time: + A datetime object representing the time at which the blob was deleted. + :ivar int remaining_retention_days: + The number of days that the blob will be retained before being permanently deleted by the service. + :ivar ~datetime.datetime creation_time: + Indicates when the blob was created, in UTC. + :ivar str archive_status: + Archive status of blob. + :ivar str encryption_key_sha256: + The SHA-256 hash of the provided encryption key. + :ivar str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + :ivar bool request_server_encrypted: + Whether this blob is encrypted. + :ivar list(~azure.storage.blob.ObjectReplicationPolicy) object_replication_source_properties: + Only present for blobs that have policy ids and rule ids applied to them. + + .. versionadded:: 12.4.0 + + :ivar str object_replication_destination_policy: + Represents the Object Replication Policy Id that created this blob. + + .. versionadded:: 12.4.0 + + :ivar ~datetime.datetime last_accessed_on: + Indicates when the last Read/Write operation was performed on a Blob. + + .. versionadded:: 12.6.0 + + :ivar int tag_count: + Tags count on this blob. + + .. versionadded:: 12.4.0 + + :ivar dict(str, str) tags: + Key value pair of tags on this blob. + + .. versionadded:: 12.4.0 + + """ + + def __init__(self, **kwargs): + self.name = kwargs.get('name') + self.container = None + self.snapshot = kwargs.get('x-ms-snapshot') + self.version_id = kwargs.get('x-ms-version-id') + self.is_current_version = kwargs.get('x-ms-is-current-version') + self.blob_type = BlobType(kwargs['x-ms-blob-type']) if kwargs.get('x-ms-blob-type') else None + self.metadata = kwargs.get('metadata') + self.encrypted_metadata = kwargs.get('encrypted_metadata') + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.size = kwargs.get('Content-Length') + self.content_range = kwargs.get('Content-Range') + self.append_blob_committed_block_count = kwargs.get('x-ms-blob-committed-block-count') + self.is_append_blob_sealed = kwargs.get('x-ms-blob-sealed') + self.page_blob_sequence_number = kwargs.get('x-ms-blob-sequence-number') + self.server_encrypted = kwargs.get('x-ms-server-encrypted') + self.copy = CopyProperties(**kwargs) + self.content_settings = ContentSettings(**kwargs) + self.lease = LeaseProperties(**kwargs) + self.blob_tier = kwargs.get('x-ms-access-tier') + self.rehydrate_priority = kwargs.get('x-ms-rehydrate-priority') + self.blob_tier_change_time = kwargs.get('x-ms-access-tier-change-time') + self.blob_tier_inferred = kwargs.get('x-ms-access-tier-inferred') + self.deleted = False + self.deleted_time = None + self.remaining_retention_days = None + self.creation_time = kwargs.get('x-ms-creation-time') + self.archive_status = kwargs.get('x-ms-archive-status') + self.encryption_key_sha256 = kwargs.get('x-ms-encryption-key-sha256') + self.encryption_scope = kwargs.get('x-ms-encryption-scope') + self.request_server_encrypted = kwargs.get('x-ms-server-encrypted') + self.object_replication_source_properties = kwargs.get('object_replication_source_properties') + self.object_replication_destination_policy = kwargs.get('x-ms-or-policy-id') + self.last_accessed_on = kwargs.get('x-ms-last-access-time') + self.tag_count = kwargs.get('x-ms-tag-count') + self.tags = None + + +class FilteredBlob(DictMixin): + """Blob info from a Filter Blobs API call. + + :ivar name: Blob name + :type name: str + :ivar container_name: Container name. + :type container_name: str + :ivar tags: Key value pairs of blob tags. + :type tags: Dict[str, str] + """ + def __init__(self, **kwargs): + self.name = kwargs.get('name', None) + self.container_name = kwargs.get('container_name', None) + self.tags = kwargs.get('tags', None) + + +class LeaseProperties(DictMixin): + """Blob Lease Properties. + + :ivar str status: + The lease status of the blob. Possible values: locked|unlocked + :ivar str state: + Lease state of the blob. Possible values: available|leased|expired|breaking|broken + :ivar str duration: + When a blob is leased, specifies whether the lease is of infinite or fixed duration. + """ + + def __init__(self, **kwargs): + self.status = get_enum_value(kwargs.get('x-ms-lease-status')) + self.state = get_enum_value(kwargs.get('x-ms-lease-state')) + self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) + + @classmethod + def _from_generated(cls, generated): + lease = cls() + lease.status = get_enum_value(generated.properties.lease_status) + lease.state = get_enum_value(generated.properties.lease_state) + lease.duration = get_enum_value(generated.properties.lease_duration) + return lease + + +class ContentSettings(DictMixin): + """The content settings of a blob. + + :param str content_type: + The content type specified for the blob. If no content type was + specified, the default content type is application/octet-stream. + :param str content_encoding: + If the content_encoding has previously been set + for the blob, that value is stored. + :param str content_language: + If the content_language has previously been set + for the blob, that value is stored. + :param str content_disposition: + content_disposition conveys additional information about how to + process the response payload, and also can be used to attach + additional metadata. If content_disposition has previously been set + for the blob, that value is stored. + :param str cache_control: + If the cache_control has previously been set for + the blob, that value is stored. + :param bytearray content_md5: + If the content_md5 has been set for the blob, this response + header is stored so that the client can check for message content + integrity. + """ + + def __init__( + self, content_type=None, content_encoding=None, + content_language=None, content_disposition=None, + cache_control=None, content_md5=None, **kwargs): + + self.content_type = content_type or kwargs.get('Content-Type') + self.content_encoding = content_encoding or kwargs.get('Content-Encoding') + self.content_language = content_language or kwargs.get('Content-Language') + self.content_md5 = content_md5 or kwargs.get('Content-MD5') + self.content_disposition = content_disposition or kwargs.get('Content-Disposition') + self.cache_control = cache_control or kwargs.get('Cache-Control') + + @classmethod + def _from_generated(cls, generated): + settings = cls() + settings.content_type = generated.properties.content_type or None + settings.content_encoding = generated.properties.content_encoding or None + settings.content_language = generated.properties.content_language or None + settings.content_md5 = generated.properties.content_md5 or None + settings.content_disposition = generated.properties.content_disposition or None + settings.cache_control = generated.properties.cache_control or None + return settings + + +class CopyProperties(DictMixin): + """Blob Copy Properties. + + These properties will be `None` if this blob has never been the destination + in a Copy Blob operation, or if this blob has been modified after a concluded + Copy Blob operation, for example, using Set Blob Properties, Upload Blob, or Commit Block List. + + :ivar str id: + String identifier for the last attempted Copy Blob operation where this blob + was the destination blob. + :ivar str source: + URL up to 2 KB in length that specifies the source blob used in the last attempted + Copy Blob operation where this blob was the destination blob. + :ivar str status: + State of the copy operation identified by Copy ID, with these values: + success: + Copy completed successfully. + pending: + Copy is in progress. Check copy_status_description if intermittent, + non-fatal errors impede copy progress but don't cause failure. + aborted: + Copy was ended by Abort Copy Blob. + failed: + Copy failed. See copy_status_description for failure details. + :ivar str progress: + Contains the number of bytes copied and the total bytes in the source in the last + attempted Copy Blob operation where this blob was the destination blob. Can show + between 0 and Content-Length bytes copied. + :ivar ~datetime.datetime completion_time: + Conclusion time of the last attempted Copy Blob operation where this blob was the + destination blob. This value can specify the time of a completed, aborted, or + failed copy attempt. + :ivar str status_description: + Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal + or non-fatal copy operation failure. + :ivar bool incremental_copy: + Copies the snapshot of the source page blob to a destination page blob. + The snapshot is copied such that only the differential changes between + the previously copied snapshot are transferred to the destination + :ivar ~datetime.datetime destination_snapshot: + Included if the blob is incremental copy blob or incremental copy snapshot, + if x-ms-copy-status is success. Snapshot time of the last successful + incremental copy snapshot for this blob. + """ + + def __init__(self, **kwargs): + self.id = kwargs.get('x-ms-copy-id') + self.source = kwargs.get('x-ms-copy-source') + self.status = get_enum_value(kwargs.get('x-ms-copy-status')) + self.progress = kwargs.get('x-ms-copy-progress') + self.completion_time = kwargs.get('x-ms-copy-completion_time') + self.status_description = kwargs.get('x-ms-copy-status-description') + self.incremental_copy = kwargs.get('x-ms-incremental-copy') + self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') + + @classmethod + def _from_generated(cls, generated): + copy = cls() + copy.id = generated.properties.copy_id or None + copy.status = get_enum_value(generated.properties.copy_status) or None + copy.source = generated.properties.copy_source or None + copy.progress = generated.properties.copy_progress or None + copy.completion_time = generated.properties.copy_completion_time or None + copy.status_description = generated.properties.copy_status_description or None + copy.incremental_copy = generated.properties.incremental_copy or None + copy.destination_snapshot = generated.properties.destination_snapshot or None + return copy + + +class BlobBlock(DictMixin): + """BlockBlob Block class. + + :param str block_id: + Block id. + :param str state: + Block state. Possible values: committed|uncommitted + :ivar int size: + Block size in bytes. + """ + + def __init__(self, block_id, state=BlockState.Latest): + self.id = block_id + self.state = state + self.size = None + + @classmethod + def _from_generated(cls, generated): + block = cls(decode_base64_to_text(generated.name)) + block.size = generated.size + return block + + +class PageRange(DictMixin): + """Page Range for page blob. + + :param int start: + Start of page range in bytes. + :param int end: + End of page range in bytes. + """ + + def __init__(self, start=None, end=None): + self.start = start + self.end = end + + +class AccessPolicy(GenAccessPolicy): + """Access Policy class used by the set and get access policy methods in each service. + + A stored access policy can specify the start time, expiry time, and + permissions for the Shared Access Signatures with which it's associated. + Depending on how you want to control access to your resource, you can + specify all of these parameters within the stored access policy, and omit + them from the URL for the Shared Access Signature. Doing so permits you to + modify the associated signature's behavior at any time, as well as to revoke + it. Or you can specify one or more of the access policy parameters within + the stored access policy, and the others on the URL. Finally, you can + specify all of the parameters on the URL. In this case, you can use the + stored access policy to revoke the signature, but not to modify its behavior. + + Together the Shared Access Signature and the stored access policy must + include all fields required to authenticate the signature. If any required + fields are missing, the request will fail. Likewise, if a field is specified + both in the Shared Access Signature URL and in the stored access policy, the + request will fail with status code 400 (Bad Request). + + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.blob.ContainerSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + """ + def __init__(self, permission=None, expiry=None, start=None): + self.start = start + self.expiry = expiry + self.permission = permission + + +class ContainerSasPermissions(object): + """ContainerSasPermissions class to be used with the + :func:`~azure.storage.blob.generate_container_sas` function and + for the AccessPolicies used with + :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. + + :param bool read: + Read the content, properties, metadata or block list of any blob in the + container. Use any blob in the container as the source of a copy operation. + :param bool write: + For any blob in the container, create or write content, properties, + metadata, or block list. Snapshot or lease the blob. Resize the blob + (page blob only). Use the blob as the destination of a copy operation + within the same account. Note: You cannot grant permissions to read or + write container properties or metadata, nor to lease a container, with + a container SAS. Use an account SAS instead. + :param bool delete: + Delete any blob in the container. Note: You cannot grant permissions to + delete a container with a container SAS. Use an account SAS instead. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool list: + List blobs in the container. + :param bool tag: + Set or get tags on the blobs in the container. + """ + def __init__(self, read=False, write=False, delete=False, list=False, delete_previous_version=False, tag=False): # pylint: disable=redefined-builtin + self.read = read + self.write = write + self.delete = delete + self.list = list + self.delete_previous_version = delete_previous_version + self.tag = tag + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('l' if self.list else '') + + ('t' if self.tag else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a ContainerSasPermissions from a string. + + To specify read, write, delete, or list permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, write, delete, + and list permissions. + :return: A ContainerSasPermissions object + :rtype: ~azure.storage.blob.ContainerSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_list = 'l' in permission + p_delete_previous_version = 'x' in permission + p_tag = 't' in permission + parsed = cls(read=p_read, write=p_write, delete=p_delete, list=p_list, + delete_previous_version=p_delete_previous_version, tag=p_tag) + + return parsed + + +class BlobSasPermissions(object): + """BlobSasPermissions class to be used with the + :func:`~azure.storage.blob.generate_blob_sas` function. + + :param bool read: + Read the content, properties, metadata and block list. Use the blob as + the source of a copy operation. + :param bool add: + Add a block to an append blob. + :param bool create: + Write a new blob, snapshot a blob, or copy a blob to a new blob. + :param bool write: + Create or write content, properties, metadata, or block list. Snapshot + or lease the blob. Resize the blob (page blob only). Use the blob as the + destination of a copy operation within the same account. + :param bool delete: + Delete the blob. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool tag: + Set or get tags on the blob. + """ + def __init__(self, read=False, add=False, create=False, write=False, + delete=False, delete_previous_version=False, tag=True): + self.read = read + self.add = add + self.create = create + self.write = write + self.delete = delete + self.delete_previous_version = delete_previous_version + self.tag = tag + self._str = (('r' if self.read else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('t' if self.tag else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a BlobSasPermissions from a string. + + To specify read, add, create, write, or delete permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, add, create, + write, or delete permissions. + :return: A BlobSasPermissions object + :rtype: ~azure.storage.blob.BlobSasPermissions + """ + p_read = 'r' in permission + p_add = 'a' in permission + p_create = 'c' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission + p_tag = 't' in permission + + parsed = cls(read=p_read, add=p_add, create=p_create, write=p_write, delete=p_delete, + delete_previous_version=p_delete_previous_version, tag=p_tag) + + return parsed + + +class CustomerProvidedEncryptionKey(object): + """ + All data in Azure Storage is encrypted at-rest using an account-level encryption key. + In versions 2018-06-17 and newer, you can manage the key used to encrypt blob contents + and application metadata per-blob by providing an AES-256 encryption key in requests to the storage service. + + When you use a customer-provided key, Azure Storage does not manage or persist your key. + When writing data to a blob, the provided key is used to encrypt your data before writing it to disk. + A SHA-256 hash of the encryption key is written alongside the blob contents, + and is used to verify that all subsequent operations against the blob use the same encryption key. + This hash cannot be used to retrieve the encryption key or decrypt the contents of the blob. + When reading a blob, the provided key is used to decrypt your data after reading it from disk. + In both cases, the provided encryption key is securely discarded + as soon as the encryption or decryption process completes. + + :param str key_value: + Base64-encoded AES-256 encryption key value. + :param str key_hash: + Base64-encoded SHA256 of the encryption key. + :ivar str algorithm: + Specifies the algorithm to use when encrypting data using the given key. Must be AES256. + """ + def __init__(self, key_value, key_hash): + self.key_value = key_value + self.key_hash = key_hash + self.algorithm = 'AES256' + + +class ContainerEncryptionScope(object): + """The default encryption scope configuration for a container. + + This scope is used implicitly for all future writes within the container, + but can be overridden per blob operation. + + .. versionadded:: 12.2.0 + + :param str default_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + :param bool prevent_encryption_scope_override: + If true, prevents any request from specifying a different encryption scope than the scope + set on the container. Default value is false. + """ + + def __init__(self, default_encryption_scope, **kwargs): + self.default_encryption_scope = default_encryption_scope + self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', False) + + @classmethod + def _from_generated(cls, generated): + if generated.properties.default_encryption_scope: + scope = cls( + generated.properties.default_encryption_scope, + prevent_encryption_scope_override=generated.properties.prevent_encryption_scope_override or False + ) + return scope + return None + + +class DelimitedJsonDialect(object): + """Defines the input or output JSON serialization for a blob data query. + + :keyword str delimiter: The line separator character, default value is '\n' + """ + + def __init__(self, **kwargs): + self.delimiter = kwargs.pop('delimiter', '\n') + + +class DelimitedTextDialect(object): + """Defines the input or output delimited (CSV) serialization for a blob query request. + + :keyword str delimiter: + Column separator, defaults to ','. + :keyword str quotechar: + Field quote, defaults to '"'. + :keyword str lineterminator: + Record separator, defaults to '\n'. + :keyword str escapechar: + Escape char, defaults to empty. + :keyword bool has_header: + Whether the blob data includes headers in the first line. The default value is False, meaning that the + data will be returned inclusive of the first line. If set to True, the data will be returned exclusive + of the first line. + """ + def __init__(self, **kwargs): + self.delimiter = kwargs.pop('delimiter', ',') + self.quotechar = kwargs.pop('quotechar', '"') + self.lineterminator = kwargs.pop('lineterminator', '\n') + self.escapechar = kwargs.pop('escapechar', "") + self.has_header = kwargs.pop('has_header', False) + + +class ArrowDialect(ArrowField): + """field of an arrow schema. + + All required parameters must be populated in order to send to Azure. + + :param ~azure.storage.blob.ArrowType type: Arrow field type. + :keyword str name: The name of the field. + :keyword int precision: The precision of the field. + :keyword int scale: The scale of the field. + """ + def __init__(self, type, **kwargs): # pylint: disable=redefined-builtin + super(ArrowDialect, self).__init__(type=type, **kwargs) + + +class ArrowType(str, Enum): + + INT64 = "int64" + BOOL = "bool" + TIMESTAMP_MS = "timestamp[ms]" + STRING = "string" + DOUBLE = "double" + DECIMAL = 'decimal' + + +class ObjectReplicationPolicy(DictMixin): + """Policy id and rule ids applied to a blob. + + :ivar str policy_id: + Policy id for the blob. A replication policy gets created (policy id) when creating a source/destination pair. + :ivar list(~azure.storage.blob.ObjectReplicationRule) rules: + Within each policy there may be multiple replication rules. + e.g. rule 1= src/container/.pdf to dst/container2/; rule2 = src/container1/.jpg to dst/container3 + """ + + def __init__(self, **kwargs): + self.policy_id = kwargs.pop('policy_id', None) + self.rules = kwargs.pop('rules', None) + + +class ObjectReplicationRule(DictMixin): + """Policy id and rule ids applied to a blob. + + :ivar str rule_id: + Rule id. + :ivar str status: + The status of the rule. It could be "Complete" or "Failed" + """ + + def __init__(self, **kwargs): + self.rule_id = kwargs.pop('rule_id', None) + self.status = kwargs.pop('status', None) + + +class BlobQueryError(object): + """The error happened during quick query operation. + + :ivar str error: + The name of the error. + :ivar bool is_fatal: + If true, this error prevents further query processing. More result data may be returned, + but there is no guarantee that all of the original data will be processed. + If false, this error does not prevent further query processing. + :ivar str description: + A description of the error. + :ivar int position: + The blob offset at which the error occurred. + """ + def __init__(self, error=None, is_fatal=False, description=None, position=None): + self.error = error + self.is_fatal = is_fatal + self.description = description + self.position = position diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_quick_query_helper.py b/azure/multiapi/storagev2/blob/v2020_06_12/_quick_query_helper.py new file mode 100644 index 0000000..eb51d98 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_quick_query_helper.py @@ -0,0 +1,196 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from io import BytesIO +from typing import Union, Iterable, IO # pylint: disable=unused-import + +from ._shared.avro.datafile import DataFileReader +from ._shared.avro.avro_io import DatumReader + + +class BlobQueryReader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to read query results. + + :ivar str name: + The name of the blob being quered. + :ivar str container: + The name of the container where the blob is. + :ivar dict response_headers: + The response_headers of the quick query request. + :ivar bytes record_delimiter: + The delimiter used to separate lines, or records with the data. The `records` + method will return these lines via a generator. + """ + + def __init__( + self, + name=None, + container=None, + errors=None, + record_delimiter='\n', + encoding=None, + headers=None, + response=None, + error_cls=None, + ): + self.name = name + self.container = container + self.response_headers = headers + self.record_delimiter = record_delimiter + self._size = 0 + self._bytes_processed = 0 + self._errors = errors + self._encoding = encoding + self._parsed_results = DataFileReader(QuickQueryStreamer(response), DatumReader()) + self._first_result = self._process_record(next(self._parsed_results)) + self._error_cls = error_cls + + def __len__(self): + return self._size + + def _process_record(self, result): + self._size = result.get('totalBytes', self._size) + self._bytes_processed = result.get('bytesScanned', self._bytes_processed) + if 'data' in result: + return result.get('data') + if 'fatal' in result: + error = self._error_cls( + error=result['name'], + is_fatal=result['fatal'], + description=result['description'], + position=result['position'] + ) + if self._errors: + self._errors(error) + return None + + def _iter_stream(self): + if self._first_result is not None: + yield self._first_result + for next_result in self._parsed_results: + processed_result = self._process_record(next_result) + if processed_result is not None: + yield processed_result + + def readall(self): + # type: () -> Union[bytes, str] + """Return all query results. + + This operation is blocking until all data is downloaded. + If encoding has been configured - this will be used to decode individual + records are they are received. + + :rtype: Union[bytes, str] + """ + stream = BytesIO() + self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + def readinto(self, stream): + # type: (IO) -> None + """Download the query result to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. + :returns: None + """ + for record in self._iter_stream(): + stream.write(record) + + def records(self): + # type: () -> Iterable[Union[bytes, str]] + """Returns a record generator for the query result. + + Records will be returned line by line. + If encoding has been configured - this will be used to decode individual + records are they are received. + + :rtype: Iterable[Union[bytes, str]] + """ + delimiter = self.record_delimiter.encode('utf-8') + for record_chunk in self._iter_stream(): + for record in record_chunk.split(delimiter): + if self._encoding: + yield record.decode(self._encoding) + else: + yield record + + + +class QuickQueryStreamer(object): + """ + File-like streaming iterator. + """ + + def __init__(self, generator): + self.generator = generator + self.iterator = iter(generator) + self._buf = b"" + self._point = 0 + self._download_offset = 0 + self._buf_start = 0 + self.file_length = None + + def __len__(self): + return self.file_length + + def __iter__(self): + return self.iterator + + @staticmethod + def seekable(): + return True + + def __next__(self): + next_part = next(self.iterator) + self._download_offset += len(next_part) + return next_part + + next = __next__ # Python 2 compatibility. + + def tell(self): + return self._point + + def seek(self, offset, whence=0): + if whence == 0: + self._point = offset + elif whence == 1: + self._point += offset + else: + raise ValueError("whence must be 0, or 1") + if self._point < 0: + self._point = 0 # XXX is this right? + + def read(self, size): + try: + # keep reading from the generator until the buffer of this stream has enough data to read + while self._point + size > self._download_offset: + self._buf += self.__next__() + except StopIteration: + self.file_length = self._download_offset + + start_point = self._point + + # EOF + self._point = min(self._point + size, self._download_offset) + + relative_start = start_point - self._buf_start + if relative_start < 0: + raise ValueError("Buffer has dumped too much data") + relative_end = relative_start + size + data = self._buf[relative_start: relative_end] + + # dump the extra data in buffer + # buffer start--------------------16bytes----current read position + dumped_size = max(relative_end - 16 - relative_start, 0) + self._buf_start += dumped_size + self._buf = self._buf[dumped_size:] + + return data diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_serialize.py b/azure/multiapi/storagev2/blob/v2020_06_12/_serialize.py new file mode 100644 index 0000000..57f748a --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_serialize.py @@ -0,0 +1,198 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use +try: + from urllib.parse import quote +except ImportError: + from urllib2 import quote # type: ignore + +from azure.core import MatchConditions + +from ._models import ( + ContainerEncryptionScope, + DelimitedJsonDialect) +from ._generated.models import ( + ModifiedAccessConditions, + SourceModifiedAccessConditions, + CpkScopeInfo, + ContainerCpkScopeInfo, + QueryFormat, + QuerySerialization, + DelimitedTextConfiguration, + JsonTextConfiguration, + ArrowConfiguration, + QueryFormatType, + BlobTag, + BlobTags, LeaseAccessConditions +) + + +_SUPPORTED_API_VERSIONS = [ + '2019-02-02', + '2019-07-07', + '2019-10-10', + '2019-12-12', + '2020-02-10', + '2020-04-08', + '2020-06-12' +] + + +def _get_match_headers(kwargs, match_param, etag_param): + # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) + if_match = None + if_none_match = None + match_condition = kwargs.pop(match_param, None) + if match_condition == MatchConditions.IfNotModified: + if_match = kwargs.pop(etag_param, None) + if not if_match: + raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) + elif match_condition == MatchConditions.IfPresent: + if_match = '*' + elif match_condition == MatchConditions.IfModified: + if_none_match = kwargs.pop(etag_param, None) + if not if_none_match: + raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) + elif match_condition == MatchConditions.IfMissing: + if_none_match = '*' + elif match_condition is None: + if kwargs.get(etag_param): + raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) + else: + raise TypeError("Invalid match condition: {}".format(match_condition)) + return if_match, if_none_match + + +def get_access_conditions(lease): + # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] + try: + lease_id = lease.id # type: ignore + except AttributeError: + lease_id = lease # type: ignore + return LeaseAccessConditions(lease_id=lease_id) if lease_id else None + + +def get_modify_conditions(kwargs): + # type: (Dict[str, Any]) -> ModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') + return ModifiedAccessConditions( + if_modified_since=kwargs.pop('if_modified_since', None), + if_unmodified_since=kwargs.pop('if_unmodified_since', None), + if_match=if_match or kwargs.pop('if_match', None), + if_none_match=if_none_match or kwargs.pop('if_none_match', None), + if_tags=kwargs.pop('if_tags_match_condition', None) + ) + + +def get_source_conditions(kwargs): + # type: (Dict[str, Any]) -> SourceModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') + return SourceModifiedAccessConditions( + source_if_modified_since=kwargs.pop('source_if_modified_since', None), + source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), + source_if_match=if_match or kwargs.pop('source_if_match', None), + source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None), + source_if_tags=kwargs.pop('source_if_tags_match_condition', None) + ) + + +def get_cpk_scope_info(kwargs): + # type: (Dict[str, Any]) -> CpkScopeInfo + if 'encryption_scope' in kwargs: + return CpkScopeInfo(encryption_scope=kwargs.pop('encryption_scope')) + return None + + +def get_container_cpk_scope_info(kwargs): + # type: (Dict[str, Any]) -> ContainerCpkScopeInfo + encryption_scope = kwargs.pop('container_encryption_scope', None) + if encryption_scope: + if isinstance(encryption_scope, ContainerEncryptionScope): + return ContainerCpkScopeInfo( + default_encryption_scope=encryption_scope.default_encryption_scope, + prevent_encryption_scope_override=encryption_scope.prevent_encryption_scope_override + ) + if isinstance(encryption_scope, dict): + return ContainerCpkScopeInfo( + default_encryption_scope=encryption_scope['default_encryption_scope'], + prevent_encryption_scope_override=encryption_scope.get('prevent_encryption_scope_override') + ) + raise TypeError("Container encryption scope must be dict or type ContainerEncryptionScope.") + return None + + +def get_api_version(kwargs, default): + # type: (Dict[str, Any]) -> str + api_version = kwargs.pop('api_version', None) + if api_version and api_version not in _SUPPORTED_API_VERSIONS: + versions = '\n'.join(_SUPPORTED_API_VERSIONS) + raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) + return api_version or default + + +def serialize_blob_tags_header(tags=None): + # type: (Optional[Dict[str, str]]) -> str + if tags is None: + return None + + components = list() + if tags: + for key, value in tags.items(): + components.append(quote(key, safe='.-')) + components.append('=') + components.append(quote(value, safe='.-')) + components.append('&') + + if components: + del components[-1] + + return ''.join(components) + + +def serialize_blob_tags(tags=None): + # type: (Optional[Dict[str, str]]) -> Union[BlobTags, None] + tag_list = list() + if tags: + tag_list = [BlobTag(key=k, value=v) for k, v in tags.items()] + return BlobTags(blob_tag_set=tag_list) + + +def serialize_query_format(formater): + if isinstance(formater, DelimitedJsonDialect): + serialization_settings = JsonTextConfiguration( + record_separator=formater.delimiter + ) + qq_format = QueryFormat( + type=QueryFormatType.json, + json_text_configuration=serialization_settings) + elif hasattr(formater, 'quotechar'): # This supports a csv.Dialect as well + try: + headers = formater.has_header + except AttributeError: + headers = False + serialization_settings = DelimitedTextConfiguration( + column_separator=formater.delimiter, + field_quote=formater.quotechar, + record_separator=formater.lineterminator, + escape_char=formater.escapechar, + headers_present=headers + ) + qq_format = QueryFormat( + type=QueryFormatType.delimited, + delimited_text_configuration=serialization_settings + ) + elif isinstance(formater, list): + serialization_settings = ArrowConfiguration( + schema=formater + ) + qq_format = QueryFormat( + type=QueryFormatType.arrow, + arrow_configuration=serialization_settings) + elif not formater: + return None + else: + raise TypeError("Format must be DelimitedTextDialect or DelimitedJsonDialect.") + return QuerySerialization(format=qq_format) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/__init__.py new file mode 100644 index 0000000..160f882 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/__init__.py @@ -0,0 +1,56 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import hmac + +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore + +import six + + +def url_quote(url): + return quote(url) + + +def url_unquote(url): + return unquote(url) + + +def encode_base64(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def decode_base64_to_bytes(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + return base64.b64decode(data) + + +def decode_base64_to_text(data): + decoded_bytes = decode_base64_to_bytes(data) + return decoded_bytes.decode('utf-8') + + +def sign_string(key, string_to_sign, key_is_base64=True): + if key_is_base64: + key = decode_base64_to_bytes(key) + else: + if isinstance(key, six.text_type): + key = key.encode('utf-8') + if isinstance(string_to_sign, six.text_type): + string_to_sign = string_to_sign.encode('utf-8') + signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) + digest = signed_hmac_sha256.digest() + encoded_digest = encode_base64(digest) + return encoded_digest diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/authentication.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/authentication.py new file mode 100644 index 0000000..d04c1e4 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/authentication.py @@ -0,0 +1,142 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import logging +import sys + +try: + from urllib.parse import urlparse, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import unquote # type: ignore + +try: + from yarl import URL +except ImportError: + pass + +try: + from azure.core.pipeline.transport import AioHttpTransport +except ImportError: + AioHttpTransport = None + +from azure.core.exceptions import ClientAuthenticationError +from azure.core.pipeline.policies import SansIOHTTPPolicy + +from . import sign_string + + +logger = logging.getLogger(__name__) + + + +# wraps a given exception with the desired exception type +def _wrap_exception(ex, desired_type): + msg = "" + if ex.args: + msg = ex.args[0] + if sys.version_info >= (3,): + # Automatic chaining in Python 3 means we keep the trace + return desired_type(msg) + # There isn't a good solution in 2 for keeping the stack trace + # in general, or that will not result in an error in 3 + # However, we can keep the previous error type and message + # TODO: In the future we will log the trace + return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) + + +class AzureSigningError(ClientAuthenticationError): + """ + Represents a fatal error when attempting to sign a request. + In general, the cause of this exception is user error. For example, the given account key is not valid. + Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. + """ + + +# pylint: disable=no-self-use +class SharedKeyCredentialPolicy(SansIOHTTPPolicy): + + def __init__(self, account_name, account_key): + self.account_name = account_name + self.account_key = account_key + super(SharedKeyCredentialPolicy, self).__init__() + + @staticmethod + def _get_headers(request, headers_to_sign): + headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) + if 'content-length' in headers and headers['content-length'] == '0': + del headers['content-length'] + return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' + + @staticmethod + def _get_verb(request): + return request.http_request.method + '\n' + + def _get_canonicalized_resource(self, request): + uri_path = urlparse(request.http_request.url).path + try: + if isinstance(request.context.transport, AioHttpTransport) or \ + isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ + isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), + AioHttpTransport): + uri_path = URL(uri_path) + return '/' + self.account_name + str(uri_path) + except TypeError: + pass + return '/' + self.account_name + uri_path + + @staticmethod + def _get_canonicalized_headers(request): + string_to_sign = '' + x_ms_headers = [] + for name, value in request.http_request.headers.items(): + if name.startswith('x-ms-'): + x_ms_headers.append((name.lower(), value)) + x_ms_headers.sort() + for name, value in x_ms_headers: + if value is not None: + string_to_sign += ''.join([name, ':', value, '\n']) + return string_to_sign + + @staticmethod + def _get_canonicalized_resource_query(request): + sorted_queries = list(request.http_request.query.items()) + sorted_queries.sort() + + string_to_sign = '' + for name, value in sorted_queries: + if value is not None: + string_to_sign += '\n' + name.lower() + ':' + unquote(value) + + return string_to_sign + + def _add_authorization_header(self, request, string_to_sign): + try: + signature = sign_string(self.account_key, string_to_sign) + auth_string = 'SharedKey ' + self.account_name + ':' + signature + request.http_request.headers['Authorization'] = auth_string + except Exception as ex: + # Wrap any error that occurred as signing error + # Doing so will clarify/locate the source of problem + raise _wrap_exception(ex, AzureSigningError) + + def on_request(self, request): + string_to_sign = \ + self._get_verb(request) + \ + self._get_headers( + request, + [ + 'content-encoding', 'content-language', 'content-length', + 'content-md5', 'content-type', 'date', 'if-modified-since', + 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' + ] + ) + \ + self._get_canonicalized_headers(request) + \ + self._get_canonicalized_resource(request) + \ + self._get_canonicalized_resource_query(request) + + self._add_authorization_header(request, string_to_sign) + #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/__init__.py new file mode 100644 index 0000000..5b396cd --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/__init__.py @@ -0,0 +1,5 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/avro_io.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/avro_io.py new file mode 100644 index 0000000..93a5c13 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/avro_io.py @@ -0,0 +1,464 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Input/output utilities. + +Includes: + - i/o-specific constants + - i/o-specific exceptions + - schema validation + - leaf value encoding and decoding + - datum reader/writer stuff (?) + +Also includes a generic representation for data, which uses the +following mapping: + - Schema records are implemented as dict. + - Schema arrays are implemented as list. + - Schema maps are implemented as dict. + - Schema strings are implemented as unicode. + - Schema bytes are implemented as str. + - Schema ints are implemented as int. + - Schema longs are implemented as long. + - Schema floats are implemented as float. + - Schema doubles are implemented as float. + - Schema booleans are implemented as bool. +""" + +import json +import logging +import struct +import sys + +from ..avro import schema + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +STRUCT_FLOAT = struct.Struct('= 0), n + input_bytes = self.reader.read(n) + if n > 0 and not input_bytes: + raise StopIteration + assert (len(input_bytes) == n), input_bytes + return input_bytes + + @staticmethod + def read_null(): + """ + null is written as zero bytes + """ + return None + + def read_boolean(self): + """ + a boolean is written as a single byte + whose value is either 0 (false) or 1 (true). + """ + b = ord(self.read(1)) + if b == 1: + return True + if b == 0: + return False + fail_msg = "Invalid value for boolean: %s" % b + raise schema.AvroException(fail_msg) + + def read_int(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + return self.read_long() + + def read_long(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + b = ord(self.read(1)) + n = b & 0x7F + shift = 7 + while (b & 0x80) != 0: + b = ord(self.read(1)) + n |= (b & 0x7F) << shift + shift += 7 + datum = (n >> 1) ^ -(n & 1) + return datum + + def read_float(self): + """ + A float is written as 4 bytes. + The float is converted into a 32-bit integer using a method equivalent to + Java's floatToIntBits and then encoded in little-endian format. + """ + return STRUCT_FLOAT.unpack(self.read(4))[0] + + def read_double(self): + """ + A double is written as 8 bytes. + The double is converted into a 64-bit integer using a method equivalent to + Java's doubleToLongBits and then encoded in little-endian format. + """ + return STRUCT_DOUBLE.unpack(self.read(8))[0] + + def read_bytes(self): + """ + Bytes are encoded as a long followed by that many bytes of data. + """ + nbytes = self.read_long() + assert (nbytes >= 0), nbytes + return self.read(nbytes) + + def read_utf8(self): + """ + A string is encoded as a long followed by + that many bytes of UTF-8 encoded character data. + """ + input_bytes = self.read_bytes() + if PY3: + try: + return input_bytes.decode('utf-8') + except UnicodeDecodeError as exn: + logger.error('Invalid UTF-8 input bytes: %r', input_bytes) + raise exn + else: + # PY2 + return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable + + def skip_null(self): + pass + + def skip_boolean(self): + self.skip(1) + + def skip_int(self): + self.skip_long() + + def skip_long(self): + b = ord(self.read(1)) + while (b & 0x80) != 0: + b = ord(self.read(1)) + + def skip_float(self): + self.skip(4) + + def skip_double(self): + self.skip(8) + + def skip_bytes(self): + self.skip(self.read_long()) + + def skip_utf8(self): + self.skip_bytes() + + def skip(self, n): + self.reader.seek(self.reader.tell() + n) + + +# ------------------------------------------------------------------------------ +# DatumReader + + +class DatumReader(object): + """Deserialize Avro-encoded data into a Python data structure.""" + + def __init__(self, writer_schema=None): + """ + As defined in the Avro specification, we call the schema encoded + in the data the "writer's schema". + """ + self._writer_schema = writer_schema + + # read/write properties + def set_writer_schema(self, writer_schema): + self._writer_schema = writer_schema + + writer_schema = property(lambda self: self._writer_schema, + set_writer_schema) + + def read(self, decoder): + return self.read_data(self.writer_schema, decoder) + + def read_data(self, writer_schema, decoder): + # function dispatch for reading data based on type of writer's schema + if writer_schema.type == 'null': + result = decoder.read_null() + elif writer_schema.type == 'boolean': + result = decoder.read_boolean() + elif writer_schema.type == 'string': + result = decoder.read_utf8() + elif writer_schema.type == 'int': + result = decoder.read_int() + elif writer_schema.type == 'long': + result = decoder.read_long() + elif writer_schema.type == 'float': + result = decoder.read_float() + elif writer_schema.type == 'double': + result = decoder.read_double() + elif writer_schema.type == 'bytes': + result = decoder.read_bytes() + elif writer_schema.type == 'fixed': + result = self.read_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = self.read_enum(writer_schema, decoder) + elif writer_schema.type == 'array': + result = self.read_array(writer_schema, decoder) + elif writer_schema.type == 'map': + result = self.read_map(writer_schema, decoder) + elif writer_schema.type in ['union', 'error_union']: + result = self.read_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + result = self.read_record(writer_schema, decoder) + else: + fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + def skip_data(self, writer_schema, decoder): + if writer_schema.type == 'null': + result = decoder.skip_null() + elif writer_schema.type == 'boolean': + result = decoder.skip_boolean() + elif writer_schema.type == 'string': + result = decoder.skip_utf8() + elif writer_schema.type == 'int': + result = decoder.skip_int() + elif writer_schema.type == 'long': + result = decoder.skip_long() + elif writer_schema.type == 'float': + result = decoder.skip_float() + elif writer_schema.type == 'double': + result = decoder.skip_double() + elif writer_schema.type == 'bytes': + result = decoder.skip_bytes() + elif writer_schema.type == 'fixed': + result = self.skip_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = self.skip_enum(decoder) + elif writer_schema.type == 'array': + self.skip_array(writer_schema, decoder) + result = None + elif writer_schema.type == 'map': + self.skip_map(writer_schema, decoder) + result = None + elif writer_schema.type in ['union', 'error_union']: + result = self.skip_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + self.skip_record(writer_schema, decoder) + result = None + else: + fail_msg = "Unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + @staticmethod + def read_fixed(writer_schema, decoder): + """ + Fixed instances are encoded using the number of bytes declared + in the schema. + """ + return decoder.read(writer_schema.size) + + @staticmethod + def skip_fixed(writer_schema, decoder): + return decoder.skip(writer_schema.size) + + @staticmethod + def read_enum(writer_schema, decoder): + """ + An enum is encoded by a int, representing the zero-based position + of the symbol in the schema. + """ + # read data + index_of_symbol = decoder.read_int() + if index_of_symbol >= len(writer_schema.symbols): + fail_msg = "Can't access enum index %d for enum with %d symbols" \ + % (index_of_symbol, len(writer_schema.symbols)) + raise SchemaResolutionException(fail_msg, writer_schema) + read_symbol = writer_schema.symbols[index_of_symbol] + return read_symbol + + @staticmethod + def skip_enum(decoder): + return decoder.skip_int() + + def read_array(self, writer_schema, decoder): + """ + Arrays are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many array items. + A block with count zero indicates the end of the array. + Each item is encoded per the array's item schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = [] + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + decoder.read_long() + for _ in range(block_count): + read_items.append(self.read_data(writer_schema.items, decoder)) + block_count = decoder.read_long() + return read_items + + def skip_array(self, writer_schema, decoder): + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = decoder.read_long() + decoder.skip(block_size) + else: + for _ in range(block_count): + self.skip_data(writer_schema.items, decoder) + block_count = decoder.read_long() + + def read_map(self, writer_schema, decoder): + """ + Maps are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many key/value pairs. + A block with count zero indicates the end of the map. + Each item is encoded per the map's value schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = {} + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + decoder.read_long() + for _ in range(block_count): + key = decoder.read_utf8() + read_items[key] = self.read_data(writer_schema.values, decoder) + block_count = decoder.read_long() + return read_items + + def skip_map(self, writer_schema, decoder): + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = decoder.read_long() + decoder.skip(block_size) + else: + for _ in range(block_count): + decoder.skip_utf8() + self.skip_data(writer_schema.values, decoder) + block_count = decoder.read_long() + + def read_union(self, writer_schema, decoder): + """ + A union is encoded by first writing a long value indicating + the zero-based position within the union of the schema of its value. + The value is then encoded per the indicated schema within the union. + """ + # schema resolution + index_of_schema = int(decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + selected_writer_schema = writer_schema.schemas[index_of_schema] + + # read data + return self.read_data(selected_writer_schema, decoder) + + def skip_union(self, writer_schema, decoder): + index_of_schema = int(decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + return self.skip_data(writer_schema.schemas[index_of_schema], decoder) + + def read_record(self, writer_schema, decoder): + """ + A record is encoded by encoding the values of its fields + in the order that they are declared. In other words, a record + is encoded as just the concatenation of the encodings of its fields. + Field values are encoded per their schema. + + Schema Resolution: + * the ordering of fields may be different: fields are matched by name. + * schemas for fields with the same name in both records are resolved + recursively. + * if the writer's record contains a field with a name not present in the + reader's record, the writer's value for that field is ignored. + * if the reader's record schema has a field that contains a default value, + and writer's schema does not have a field with the same name, then the + reader should use the default value from its field. + * if the reader's record schema has a field with no default value, and + writer's schema does not have a field with the same name, then the + field's value is unset. + """ + # schema resolution + read_record = {} + for field in writer_schema.fields: + field_val = self.read_data(field.type, decoder) + read_record[field.name] = field_val + return read_record + + def skip_record(self, writer_schema, decoder): + for field in writer_schema.fields: + self.skip_data(field.type, decoder) + + +# ------------------------------------------------------------------------------ + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/avro_io_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/avro_io_async.py new file mode 100644 index 0000000..e981216 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/avro_io_async.py @@ -0,0 +1,448 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Input/output utilities. + +Includes: + - i/o-specific constants + - i/o-specific exceptions + - schema validation + - leaf value encoding and decoding + - datum reader/writer stuff (?) + +Also includes a generic representation for data, which uses the +following mapping: + - Schema records are implemented as dict. + - Schema arrays are implemented as list. + - Schema maps are implemented as dict. + - Schema strings are implemented as unicode. + - Schema bytes are implemented as str. + - Schema ints are implemented as int. + - Schema longs are implemented as long. + - Schema floats are implemented as float. + - Schema doubles are implemented as float. + - Schema booleans are implemented as bool. +""" + +import logging +import sys + +from ..avro import schema + +from .avro_io import STRUCT_FLOAT, STRUCT_DOUBLE, SchemaResolutionException + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Decoder + + +class AsyncBinaryDecoder(object): + """Read leaf values.""" + + def __init__(self, reader): + """ + reader is a Python object on which we can call read, seek, and tell. + """ + self._reader = reader + + @property + def reader(self): + """Reports the reader used by this decoder.""" + return self._reader + + async def read(self, n): + """Read n bytes. + + Args: + n: Number of bytes to read. + Returns: + The next n bytes from the input. + """ + assert (n >= 0), n + input_bytes = await self.reader.read(n) + if n > 0 and not input_bytes: + raise StopAsyncIteration + assert (len(input_bytes) == n), input_bytes + return input_bytes + + @staticmethod + def read_null(): + """ + null is written as zero bytes + """ + return None + + async def read_boolean(self): + """ + a boolean is written as a single byte + whose value is either 0 (false) or 1 (true). + """ + b = ord(await self.read(1)) + if b == 1: + return True + if b == 0: + return False + fail_msg = "Invalid value for boolean: %s" % b + raise schema.AvroException(fail_msg) + + async def read_int(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + return await self.read_long() + + async def read_long(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + b = ord(await self.read(1)) + n = b & 0x7F + shift = 7 + while (b & 0x80) != 0: + b = ord(await self.read(1)) + n |= (b & 0x7F) << shift + shift += 7 + datum = (n >> 1) ^ -(n & 1) + return datum + + async def read_float(self): + """ + A float is written as 4 bytes. + The float is converted into a 32-bit integer using a method equivalent to + Java's floatToIntBits and then encoded in little-endian format. + """ + return STRUCT_FLOAT.unpack(await self.read(4))[0] + + async def read_double(self): + """ + A double is written as 8 bytes. + The double is converted into a 64-bit integer using a method equivalent to + Java's doubleToLongBits and then encoded in little-endian format. + """ + return STRUCT_DOUBLE.unpack(await self.read(8))[0] + + async def read_bytes(self): + """ + Bytes are encoded as a long followed by that many bytes of data. + """ + nbytes = await self.read_long() + assert (nbytes >= 0), nbytes + return await self.read(nbytes) + + async def read_utf8(self): + """ + A string is encoded as a long followed by + that many bytes of UTF-8 encoded character data. + """ + input_bytes = await self.read_bytes() + if PY3: + try: + return input_bytes.decode('utf-8') + except UnicodeDecodeError as exn: + logger.error('Invalid UTF-8 input bytes: %r', input_bytes) + raise exn + else: + # PY2 + return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable + + def skip_null(self): + pass + + async def skip_boolean(self): + await self.skip(1) + + async def skip_int(self): + await self.skip_long() + + async def skip_long(self): + b = ord(await self.read(1)) + while (b & 0x80) != 0: + b = ord(await self.read(1)) + + async def skip_float(self): + await self.skip(4) + + async def skip_double(self): + await self.skip(8) + + async def skip_bytes(self): + await self.skip(await self.read_long()) + + async def skip_utf8(self): + await self.skip_bytes() + + async def skip(self, n): + await self.reader.seek(await self.reader.tell() + n) + + +# ------------------------------------------------------------------------------ +# DatumReader + + +class AsyncDatumReader(object): + """Deserialize Avro-encoded data into a Python data structure.""" + + def __init__(self, writer_schema=None): + """ + As defined in the Avro specification, we call the schema encoded + in the data the "writer's schema", and the schema expected by the + reader the "reader's schema". + """ + self._writer_schema = writer_schema + + # read/write properties + def set_writer_schema(self, writer_schema): + self._writer_schema = writer_schema + + writer_schema = property(lambda self: self._writer_schema, + set_writer_schema) + + async def read(self, decoder): + return await self.read_data(self.writer_schema, decoder) + + async def read_data(self, writer_schema, decoder): + # function dispatch for reading data based on type of writer's schema + if writer_schema.type == 'null': + result = decoder.read_null() + elif writer_schema.type == 'boolean': + result = await decoder.read_boolean() + elif writer_schema.type == 'string': + result = await decoder.read_utf8() + elif writer_schema.type == 'int': + result = await decoder.read_int() + elif writer_schema.type == 'long': + result = await decoder.read_long() + elif writer_schema.type == 'float': + result = await decoder.read_float() + elif writer_schema.type == 'double': + result = await decoder.read_double() + elif writer_schema.type == 'bytes': + result = await decoder.read_bytes() + elif writer_schema.type == 'fixed': + result = await self.read_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = await self.read_enum(writer_schema, decoder) + elif writer_schema.type == 'array': + result = await self.read_array(writer_schema, decoder) + elif writer_schema.type == 'map': + result = await self.read_map(writer_schema, decoder) + elif writer_schema.type in ['union', 'error_union']: + result = await self.read_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + result = await self.read_record(writer_schema, decoder) + else: + fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + async def skip_data(self, writer_schema, decoder): + if writer_schema.type == 'null': + result = decoder.skip_null() + elif writer_schema.type == 'boolean': + result = await decoder.skip_boolean() + elif writer_schema.type == 'string': + result = await decoder.skip_utf8() + elif writer_schema.type == 'int': + result = await decoder.skip_int() + elif writer_schema.type == 'long': + result = await decoder.skip_long() + elif writer_schema.type == 'float': + result = await decoder.skip_float() + elif writer_schema.type == 'double': + result = await decoder.skip_double() + elif writer_schema.type == 'bytes': + result = await decoder.skip_bytes() + elif writer_schema.type == 'fixed': + result = await self.skip_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = await self.skip_enum(decoder) + elif writer_schema.type == 'array': + await self.skip_array(writer_schema, decoder) + result = None + elif writer_schema.type == 'map': + await self.skip_map(writer_schema, decoder) + result = None + elif writer_schema.type in ['union', 'error_union']: + result = await self.skip_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + await self.skip_record(writer_schema, decoder) + result = None + else: + fail_msg = "Unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + @staticmethod + async def read_fixed(writer_schema, decoder): + """ + Fixed instances are encoded using the number of bytes declared + in the schema. + """ + return await decoder.read(writer_schema.size) + + @staticmethod + async def skip_fixed(writer_schema, decoder): + return await decoder.skip(writer_schema.size) + + @staticmethod + async def read_enum(writer_schema, decoder): + """ + An enum is encoded by a int, representing the zero-based position + of the symbol in the schema. + """ + # read data + index_of_symbol = await decoder.read_int() + if index_of_symbol >= len(writer_schema.symbols): + fail_msg = "Can't access enum index %d for enum with %d symbols" \ + % (index_of_symbol, len(writer_schema.symbols)) + raise SchemaResolutionException(fail_msg, writer_schema) + read_symbol = writer_schema.symbols[index_of_symbol] + return read_symbol + + @staticmethod + async def skip_enum(decoder): + return await decoder.skip_int() + + async def read_array(self, writer_schema, decoder): + """ + Arrays are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many array items. + A block with count zero indicates the end of the array. + Each item is encoded per the array's item schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = [] + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + await decoder.read_long() + for _ in range(block_count): + read_items.append(await self.read_data(writer_schema.items, decoder)) + block_count = await decoder.read_long() + return read_items + + async def skip_array(self, writer_schema, decoder): + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = await decoder.read_long() + await decoder.skip(block_size) + else: + for _ in range(block_count): + await self.skip_data(writer_schema.items, decoder) + block_count = await decoder.read_long() + + async def read_map(self, writer_schema, decoder): + """ + Maps are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many key/value pairs. + A block with count zero indicates the end of the map. + Each item is encoded per the map's value schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = {} + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + await decoder.read_long() + for _ in range(block_count): + key = await decoder.read_utf8() + read_items[key] = await self.read_data(writer_schema.values, decoder) + block_count = await decoder.read_long() + return read_items + + async def skip_map(self, writer_schema, decoder): + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = await decoder.read_long() + await decoder.skip(block_size) + else: + for _ in range(block_count): + await decoder.skip_utf8() + await self.skip_data(writer_schema.values, decoder) + block_count = await decoder.read_long() + + async def read_union(self, writer_schema, decoder): + """ + A union is encoded by first writing a long value indicating + the zero-based position within the union of the schema of its value. + The value is then encoded per the indicated schema within the union. + """ + # schema resolution + index_of_schema = int(await decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + selected_writer_schema = writer_schema.schemas[index_of_schema] + + # read data + return await self.read_data(selected_writer_schema, decoder) + + async def skip_union(self, writer_schema, decoder): + index_of_schema = int(await decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + return await self.skip_data(writer_schema.schemas[index_of_schema], decoder) + + async def read_record(self, writer_schema, decoder): + """ + A record is encoded by encoding the values of its fields + in the order that they are declared. In other words, a record + is encoded as just the concatenation of the encodings of its fields. + Field values are encoded per their schema. + + Schema Resolution: + * the ordering of fields may be different: fields are matched by name. + * schemas for fields with the same name in both records are resolved + recursively. + * if the writer's record contains a field with a name not present in the + reader's record, the writer's value for that field is ignored. + * if the reader's record schema has a field that contains a default value, + and writer's schema does not have a field with the same name, then the + reader should use the default value from its field. + * if the reader's record schema has a field with no default value, and + writer's schema does not have a field with the same name, then the + field's value is unset. + """ + # schema resolution + read_record = {} + for field in writer_schema.fields: + field_val = await self.read_data(field.type, decoder) + read_record[field.name] = field_val + return read_record + + async def skip_record(self, writer_schema, decoder): + for field in writer_schema.fields: + await self.skip_data(field.type, decoder) + + +# ------------------------------------------------------------------------------ + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/datafile.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/datafile.py new file mode 100644 index 0000000..df06fe0 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/datafile.py @@ -0,0 +1,266 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Read/Write Avro File Object Containers.""" + +import io +import logging +import sys +import zlib + +from ..avro import avro_io +from ..avro import schema + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +# Version of the container file: +VERSION = 1 + +if PY3: + MAGIC = b'Obj' + bytes([VERSION]) + MAGIC_SIZE = len(MAGIC) +else: + MAGIC = 'Obj' + chr(VERSION) + MAGIC_SIZE = len(MAGIC) + +# Size of the synchronization marker, in number of bytes: +SYNC_SIZE = 16 + +# Schema of the container header: +META_SCHEMA = schema.parse(""" +{ + "type": "record", "name": "org.apache.avro.file.Header", + "fields": [{ + "name": "magic", + "type": {"type": "fixed", "name": "magic", "size": %(magic_size)d} + }, { + "name": "meta", + "type": {"type": "map", "values": "bytes"} + }, { + "name": "sync", + "type": {"type": "fixed", "name": "sync", "size": %(sync_size)d} + }] +} +""" % { + 'magic_size': MAGIC_SIZE, + 'sync_size': SYNC_SIZE, +}) + +# Codecs supported by container files: +VALID_CODECS = frozenset(['null', 'deflate']) + +# Metadata key associated to the schema: +SCHEMA_KEY = "avro.schema" + + +# ------------------------------------------------------------------------------ +# Exceptions + + +class DataFileException(schema.AvroException): + """Problem reading or writing file object containers.""" + +# ------------------------------------------------------------------------------ + + +class DataFileReader(object): # pylint: disable=too-many-instance-attributes + """Read files written by DataFileWriter.""" + + def __init__(self, reader, datum_reader, **kwargs): + """Initializes a new data file reader. + + Args: + reader: Open file to read from. + datum_reader: Avro datum reader. + """ + self._reader = reader + self._raw_decoder = avro_io.BinaryDecoder(reader) + self._header_reader = kwargs.pop('header_reader', None) + self._header_decoder = None if self._header_reader is None else avro_io.BinaryDecoder(self._header_reader) + self._datum_decoder = None # Maybe reset at every block. + self._datum_reader = datum_reader + + # In case self._reader only has partial content(without header). + # seek(0, 0) to make sure read the (partial)content from beginning. + self._reader.seek(0, 0) + + # read the header: magic, meta, sync + self._read_header() + + # ensure codec is valid + avro_codec_raw = self.get_meta('avro.codec') + if avro_codec_raw is None: + self.codec = "null" + else: + self.codec = avro_codec_raw.decode('utf-8') + if self.codec not in VALID_CODECS: + raise DataFileException('Unknown codec: %s.' % self.codec) + + # get ready to read + self._block_count = 0 + + # object_position is to support reading from current position in the future read, + # no need to downloading from the beginning of avro. + if hasattr(self._reader, 'object_position'): + self.reader.track_object_position() + + self._cur_object_index = 0 + # header_reader indicates reader only has partial content. The reader doesn't have block header, + # so we read use the block count stored last time. + # Also ChangeFeed only has codec==null, so use _raw_decoder is good. + if self._header_reader is not None: + self._datum_decoder = self._raw_decoder + + self.datum_reader.writer_schema = ( + schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) + + def __enter__(self): + return self + + def __exit__(self, data_type, value, traceback): + # Perform a close if there's no exception + if data_type is None: + self.close() + + def __iter__(self): + return self + + # read-only properties + @property + def reader(self): + return self._reader + + @property + def raw_decoder(self): + return self._raw_decoder + + @property + def datum_decoder(self): + return self._datum_decoder + + @property + def datum_reader(self): + return self._datum_reader + + @property + def sync_marker(self): + return self._sync_marker + + @property + def meta(self): + return self._meta + + # read/write properties + @property + def block_count(self): + return self._block_count + + def get_meta(self, key): + """Reports the value of a given metadata key. + + Args: + key: Metadata key (string) to report the value of. + Returns: + Value associated to the metadata key, as bytes. + """ + return self._meta.get(key) + + def _read_header(self): + header_reader = self._header_reader if self._header_reader else self._reader + header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder + + # seek to the beginning of the file to get magic block + header_reader.seek(0, 0) + + # read header into a dict + header = self.datum_reader.read_data(META_SCHEMA, header_decoder) + + # check magic number + if header.get('magic') != MAGIC: + fail_msg = "Not an Avro data file: %s doesn't match %s." \ + % (header.get('magic'), MAGIC) + raise schema.AvroException(fail_msg) + + # set metadata + self._meta = header['meta'] + + # set sync marker + self._sync_marker = header['sync'] + + def _read_block_header(self): + self._block_count = self.raw_decoder.read_long() + if self.codec == "null": + # Skip a long; we don't need to use the length. + self.raw_decoder.skip_long() + self._datum_decoder = self._raw_decoder + elif self.codec == 'deflate': + # Compressed data is stored as (length, data), which + # corresponds to how the "bytes" type is encoded. + data = self.raw_decoder.read_bytes() + # -15 is the log of the window size; negative indicates + # "raw" (no zlib headers) decompression. See zlib.h. + uncompressed = zlib.decompress(data, -15) + self._datum_decoder = avro_io.BinaryDecoder(io.BytesIO(uncompressed)) + else: + raise DataFileException("Unknown codec: %r" % self.codec) + + def _skip_sync(self): + """ + Read the length of the sync marker; if it matches the sync marker, + return True. Otherwise, seek back to where we started and return False. + """ + proposed_sync_marker = self.reader.read(SYNC_SIZE) + if SYNC_SIZE > 0 and not proposed_sync_marker: + raise StopIteration + if proposed_sync_marker != self.sync_marker: + self.reader.seek(-SYNC_SIZE, 1) + + def __next__(self): + """Return the next datum in the file.""" + if self.block_count == 0: + self._skip_sync() + + # object_position is to support reading from current position in the future read, + # no need to downloading from the beginning of avro file with this attr. + if hasattr(self._reader, 'object_position'): + self.reader.track_object_position() + self._cur_object_index = 0 + + self._read_block_header() + + datum = self.datum_reader.read(self.datum_decoder) + self._block_count -= 1 + self._cur_object_index += 1 + + # object_position is to support reading from current position in the future read, + # This will track the index of the next item to be read. + # This will also track the offset before the next sync marker. + if hasattr(self._reader, 'object_position'): + if self.block_count == 0: + # the next event to be read is at index 0 in the new chunk of blocks, + self.reader.track_object_position() + self.reader.set_object_index(0) + else: + self.reader.set_object_index(self._cur_object_index) + + return datum + + # PY2 + def next(self): + return self.__next__() + + def close(self): + """Close this reader.""" + self.reader.close() + + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/datafile_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/datafile_async.py new file mode 100644 index 0000000..1e9d018 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/datafile_async.py @@ -0,0 +1,215 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Read/Write Avro File Object Containers.""" + +import logging +import sys + +from ..avro import avro_io_async +from ..avro import schema +from .datafile import DataFileException +from .datafile import MAGIC, SYNC_SIZE, META_SCHEMA, SCHEMA_KEY + + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +# Codecs supported by container files: +VALID_CODECS = frozenset(['null']) + + +class AsyncDataFileReader(object): # pylint: disable=too-many-instance-attributes + """Read files written by DataFileWriter.""" + + def __init__(self, reader, datum_reader, **kwargs): + """Initializes a new data file reader. + + Args: + reader: Open file to read from. + datum_reader: Avro datum reader. + """ + self._reader = reader + self._raw_decoder = avro_io_async.AsyncBinaryDecoder(reader) + self._header_reader = kwargs.pop('header_reader', None) + self._header_decoder = None if self._header_reader is None else \ + avro_io_async.AsyncBinaryDecoder(self._header_reader) + self._datum_decoder = None # Maybe reset at every block. + self._datum_reader = datum_reader + self.codec = "null" + self._block_count = 0 + self._cur_object_index = 0 + self._meta = None + self._sync_marker = None + + async def init(self): + # In case self._reader only has partial content(without header). + # seek(0, 0) to make sure read the (partial)content from beginning. + await self._reader.seek(0, 0) + + # read the header: magic, meta, sync + await self._read_header() + + # ensure codec is valid + avro_codec_raw = self.get_meta('avro.codec') + if avro_codec_raw is None: + self.codec = "null" + else: + self.codec = avro_codec_raw.decode('utf-8') + if self.codec not in VALID_CODECS: + raise DataFileException('Unknown codec: %s.' % self.codec) + + # get ready to read + self._block_count = 0 + + # object_position is to support reading from current position in the future read, + # no need to downloading from the beginning of avro. + if hasattr(self._reader, 'object_position'): + self.reader.track_object_position() + + # header_reader indicates reader only has partial content. The reader doesn't have block header, + # so we read use the block count stored last time. + # Also ChangeFeed only has codec==null, so use _raw_decoder is good. + if self._header_reader is not None: + self._datum_decoder = self._raw_decoder + self.datum_reader.writer_schema = ( + schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) + return self + + async def __aenter__(self): + return self + + async def __aexit__(self, data_type, value, traceback): + # Perform a close if there's no exception + if data_type is None: + self.close() + + def __aiter__(self): + return self + + # read-only properties + @property + def reader(self): + return self._reader + + @property + def raw_decoder(self): + return self._raw_decoder + + @property + def datum_decoder(self): + return self._datum_decoder + + @property + def datum_reader(self): + return self._datum_reader + + @property + def sync_marker(self): + return self._sync_marker + + @property + def meta(self): + return self._meta + + # read/write properties + @property + def block_count(self): + return self._block_count + + def get_meta(self, key): + """Reports the value of a given metadata key. + + Args: + key: Metadata key (string) to report the value of. + Returns: + Value associated to the metadata key, as bytes. + """ + return self._meta.get(key) + + async def _read_header(self): + header_reader = self._header_reader if self._header_reader else self._reader + header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder + + # seek to the beginning of the file to get magic block + await header_reader.seek(0, 0) + + # read header into a dict + header = await self.datum_reader.read_data(META_SCHEMA, header_decoder) + + # check magic number + if header.get('magic') != MAGIC: + fail_msg = "Not an Avro data file: %s doesn't match %s." \ + % (header.get('magic'), MAGIC) + raise schema.AvroException(fail_msg) + + # set metadata + self._meta = header['meta'] + + # set sync marker + self._sync_marker = header['sync'] + + async def _read_block_header(self): + self._block_count = await self.raw_decoder.read_long() + if self.codec == "null": + # Skip a long; we don't need to use the length. + await self.raw_decoder.skip_long() + self._datum_decoder = self._raw_decoder + else: + raise DataFileException("Unknown codec: %r" % self.codec) + + async def _skip_sync(self): + """ + Read the length of the sync marker; if it matches the sync marker, + return True. Otherwise, seek back to where we started and return False. + """ + proposed_sync_marker = await self.reader.read(SYNC_SIZE) + if SYNC_SIZE > 0 and not proposed_sync_marker: + raise StopAsyncIteration + if proposed_sync_marker != self.sync_marker: + await self.reader.seek(-SYNC_SIZE, 1) + + async def __anext__(self): + """Return the next datum in the file.""" + if self.block_count == 0: + await self._skip_sync() + + # object_position is to support reading from current position in the future read, + # no need to downloading from the beginning of avro file with this attr. + if hasattr(self._reader, 'object_position'): + await self.reader.track_object_position() + self._cur_object_index = 0 + + await self._read_block_header() + + datum = await self.datum_reader.read(self.datum_decoder) + self._block_count -= 1 + self._cur_object_index += 1 + + # object_position is to support reading from current position in the future read, + # This will track the index of the next item to be read. + # This will also track the offset before the next sync marker. + if hasattr(self._reader, 'object_position'): + if self.block_count == 0: + # the next event to be read is at index 0 in the new chunk of blocks, + await self.reader.track_object_position() + await self.reader.set_object_index(0) + else: + await self.reader.set_object_index(self._cur_object_index) + + return datum + + def close(self): + """Close this reader.""" + self.reader.close() + + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/schema.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/schema.py new file mode 100644 index 0000000..ffe2853 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/avro/schema.py @@ -0,0 +1,1221 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines + +"""Representation of Avro schemas. + +A schema may be one of: + - A record, mapping field names to field value data; + - An error, equivalent to a record; + - An enum, containing one of a small set of symbols; + - An array of values, all of the same schema; + - A map containing string/value pairs, each of a declared schema; + - A union of other schemas; + - A fixed sized binary object; + - A unicode string; + - A sequence of bytes; + - A 32-bit signed int; + - A 64-bit signed long; + - A 32-bit floating-point float; + - A 64-bit floating-point double; + - A boolean; + - Null. +""" + +import abc +import json +import logging +import re +import sys +from six import with_metaclass + +PY2 = sys.version_info[0] == 2 + +if PY2: + _str = unicode # pylint: disable=undefined-variable +else: + _str = str + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +# Log level more verbose than DEBUG=10, INFO=20, etc. +DEBUG_VERBOSE = 5 + +NULL = 'null' +BOOLEAN = 'boolean' +STRING = 'string' +BYTES = 'bytes' +INT = 'int' +LONG = 'long' +FLOAT = 'float' +DOUBLE = 'double' +FIXED = 'fixed' +ENUM = 'enum' +RECORD = 'record' +ERROR = 'error' +ARRAY = 'array' +MAP = 'map' +UNION = 'union' + +# Request and error unions are part of Avro protocols: +REQUEST = 'request' +ERROR_UNION = 'error_union' + +PRIMITIVE_TYPES = frozenset([ + NULL, + BOOLEAN, + STRING, + BYTES, + INT, + LONG, + FLOAT, + DOUBLE, +]) + +NAMED_TYPES = frozenset([ + FIXED, + ENUM, + RECORD, + ERROR, +]) + +VALID_TYPES = frozenset.union( + PRIMITIVE_TYPES, + NAMED_TYPES, + [ + ARRAY, + MAP, + UNION, + REQUEST, + ERROR_UNION, + ], +) + +SCHEMA_RESERVED_PROPS = frozenset([ + 'type', + 'name', + 'namespace', + 'fields', # Record + 'items', # Array + 'size', # Fixed + 'symbols', # Enum + 'values', # Map + 'doc', +]) + +FIELD_RESERVED_PROPS = frozenset([ + 'default', + 'name', + 'doc', + 'order', + 'type', +]) + +VALID_FIELD_SORT_ORDERS = frozenset([ + 'ascending', + 'descending', + 'ignore', +]) + + +# ------------------------------------------------------------------------------ +# Exceptions + + +class Error(Exception): + """Base class for errors in this module.""" + + +class AvroException(Error): + """Generic Avro schema error.""" + + +class SchemaParseException(AvroException): + """Error while parsing a JSON schema descriptor.""" + + +class Schema(with_metaclass(abc.ABCMeta, object)): + """Abstract base class for all Schema classes.""" + + def __init__(self, data_type, other_props=None): + """Initializes a new schema object. + + Args: + data_type: Type of the schema to initialize. + other_props: Optional dictionary of additional properties. + """ + if data_type not in VALID_TYPES: + raise SchemaParseException('%r is not a valid Avro type.' % data_type) + + # All properties of this schema, as a map: property name -> property value + self._props = {} + + self._props['type'] = data_type + self._type = data_type + + if other_props: + self._props.update(other_props) + + @property + def namespace(self): + """Returns: the namespace this schema belongs to, if any, or None.""" + return self._props.get('namespace', None) + + @property + def type(self): + """Returns: the type of this schema.""" + return self._type + + @property + def doc(self): + """Returns: the documentation associated to this schema, if any, or None.""" + return self._props.get('doc', None) + + @property + def props(self): + """Reports all the properties of this schema. + + Includes all properties, reserved and non reserved. + JSON properties of this schema are directly generated from this dict. + + Returns: + A dictionary of properties associated to this schema. + """ + return self._props + + @property + def other_props(self): + """Returns: the dictionary of non-reserved properties.""" + return dict(filter_keys_out(items=self._props, keys=SCHEMA_RESERVED_PROPS)) + + def __str__(self): + """Returns: the JSON representation of this schema.""" + return json.dumps(self.to_json(names=None)) + + @abc.abstractmethod + def to_json(self, names): + """Converts the schema object into its AVRO specification representation. + + Schema types that have names (records, enums, and fixed) must + be aware of not re-defining schemas that are already listed + in the parameter names. + """ + raise Exception('Cannot run abstract method.') + + +# ------------------------------------------------------------------------------ + + +_RE_NAME = re.compile(r'[A-Za-z_][A-Za-z0-9_]*') + +_RE_FULL_NAME = re.compile( + r'^' + r'[.]?(?:[A-Za-z_][A-Za-z0-9_]*[.])*' # optional namespace + r'([A-Za-z_][A-Za-z0-9_]*)' # name + r'$' +) + + +class Name(object): + """Representation of an Avro name.""" + + def __init__(self, name, namespace=None): + """Parses an Avro name. + + Args: + name: Avro name to parse (relative or absolute). + namespace: Optional explicit namespace if the name is relative. + """ + # Normalize: namespace is always defined as a string, possibly empty. + if namespace is None: + namespace = '' + + if '.' in name: + # name is absolute, namespace is ignored: + self._fullname = name + + match = _RE_FULL_NAME.match(self._fullname) + if match is None: + raise SchemaParseException( + 'Invalid absolute schema name: %r.' % self._fullname) + + self._name = match.group(1) + self._namespace = self._fullname[:-(len(self._name) + 1)] + + else: + # name is relative, combine with explicit namespace: + self._name = name + self._namespace = namespace + self._fullname = (self._name + if (not self._namespace) else + '%s.%s' % (self._namespace, self._name)) + + # Validate the fullname: + if _RE_FULL_NAME.match(self._fullname) is None: + raise SchemaParseException( + 'Invalid schema name %r infered from name %r and namespace %r.' + % (self._fullname, self._name, self._namespace)) + + def __eq__(self, other): + if not isinstance(other, Name): + return NotImplemented + return self.fullname == other.fullname + + @property + def simple_name(self): + """Returns: the simple name part of this name.""" + return self._name + + @property + def namespace(self): + """Returns: this name's namespace, possible the empty string.""" + return self._namespace + + @property + def fullname(self): + """Returns: the full name.""" + return self._fullname + + +# ------------------------------------------------------------------------------ + + +class Names(object): + """Tracks Avro named schemas and default namespace during parsing.""" + + def __init__(self, default_namespace=None, names=None): + """Initializes a new name tracker. + + Args: + default_namespace: Optional default namespace. + names: Optional initial mapping of known named schemas. + """ + if names is None: + names = {} + self._names = names + self._default_namespace = default_namespace + + @property + def names(self): + """Returns: the mapping of known named schemas.""" + return self._names + + @property + def default_namespace(self): + """Returns: the default namespace, if any, or None.""" + return self._default_namespace + + def new_with_default_namespace(self, namespace): + """Creates a new name tracker from this tracker, but with a new default ns. + + Args: + namespace: New default namespace to use. + Returns: + New name tracker with the specified default namespace. + """ + return Names(names=self._names, default_namespace=namespace) + + def get_name(self, name, namespace=None): + """Resolves the Avro name according to this name tracker's state. + + Args: + name: Name to resolve (absolute or relative). + namespace: Optional explicit namespace. + Returns: + The specified name, resolved according to this tracker. + """ + if namespace is None: + namespace = self._default_namespace + return Name(name=name, namespace=namespace) + + def get_schema(self, name, namespace=None): + """Resolves an Avro schema by name. + + Args: + name: Name (relative or absolute) of the Avro schema to look up. + namespace: Optional explicit namespace. + Returns: + The schema with the specified name, if any, or None. + """ + avro_name = self.get_name(name=name, namespace=namespace) + return self._names.get(avro_name.fullname, None) + + def prune_namespace(self, properties): + """given a properties, return properties with namespace removed if + it matches the own default namespace + """ + if self.default_namespace is None: + # I have no default -- no change + return properties + if 'namespace' not in properties: + # he has no namespace - no change + return properties + if properties['namespace'] != self.default_namespace: + # we're different - leave his stuff alone + return properties + # we each have a namespace and it's redundant. delete his. + prunable = properties.copy() + del prunable['namespace'] + return prunable + + def register(self, schema): + """Registers a new named schema in this tracker. + + Args: + schema: Named Avro schema to register in this tracker. + """ + if schema.fullname in VALID_TYPES: + raise SchemaParseException( + '%s is a reserved type name.' % schema.fullname) + if schema.fullname in self.names: + raise SchemaParseException( + 'Avro name %r already exists.' % schema.fullname) + + logger.log(DEBUG_VERBOSE, 'Register new name for %r', schema.fullname) + self._names[schema.fullname] = schema + + +# ------------------------------------------------------------------------------ + + +class NamedSchema(Schema): + """Abstract base class for named schemas. + + Named schemas are enumerated in NAMED_TYPES. + """ + + def __init__( + self, + data_type, + name=None, + namespace=None, + names=None, + other_props=None, + ): + """Initializes a new named schema object. + + Args: + data_type: Type of the named schema. + name: Name (absolute or relative) of the schema. + namespace: Optional explicit namespace if name is relative. + names: Tracker to resolve and register Avro names. + other_props: Optional map of additional properties of the schema. + """ + assert (data_type in NAMED_TYPES), ('Invalid named type: %r' % data_type) + self._avro_name = names.get_name(name=name, namespace=namespace) + + super(NamedSchema, self).__init__(data_type, other_props) + + names.register(self) + + self._props['name'] = self.name + if self.namespace: + self._props['namespace'] = self.namespace + + @property + def avro_name(self): + """Returns: the Name object describing this schema's name.""" + return self._avro_name + + @property + def name(self): + return self._avro_name.simple_name + + @property + def namespace(self): + return self._avro_name.namespace + + @property + def fullname(self): + return self._avro_name.fullname + + def name_ref(self, names): + """Reports this schema name relative to the specified name tracker. + + Args: + names: Avro name tracker to relativise this schema name against. + Returns: + This schema name, relativised against the specified name tracker. + """ + if self.namespace == names.default_namespace: + return self.name + return self.fullname + + @abc.abstractmethod + def to_json(self, names): + """Converts the schema object into its AVRO specification representation. + + Schema types that have names (records, enums, and fixed) must + be aware of not re-defining schemas that are already listed + in the parameter names. + """ + raise Exception('Cannot run abstract method.') + +# ------------------------------------------------------------------------------ + + +_NO_DEFAULT = object() + + +class Field(object): + """Representation of the schema of a field in a record.""" + + def __init__( + self, + data_type, + name, + index, + has_default, + default=_NO_DEFAULT, + order=None, + doc=None, + other_props=None + ): + """Initializes a new Field object. + + Args: + data_type: Avro schema of the field. + name: Name of the field. + index: 0-based position of the field. + has_default: + default: + order: + doc: + other_props: + """ + if (not isinstance(name, _str)) or (not name): + raise SchemaParseException('Invalid record field name: %r.' % name) + if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS): + raise SchemaParseException('Invalid record field order: %r.' % order) + + # All properties of this record field: + self._props = {} + + self._has_default = has_default + if other_props: + self._props.update(other_props) + + self._index = index + self._type = self._props['type'] = data_type + self._name = self._props['name'] = name + + if has_default: + self._props['default'] = default + + if order is not None: + self._props['order'] = order + + if doc is not None: + self._props['doc'] = doc + + @property + def type(self): + """Returns: the schema of this field.""" + return self._type + + @property + def name(self): + """Returns: this field name.""" + return self._name + + @property + def index(self): + """Returns: the 0-based index of this field in the record.""" + return self._index + + @property + def default(self): + return self._props['default'] + + @property + def has_default(self): + return self._has_default + + @property + def order(self): + return self._props.get('order', None) + + @property + def doc(self): + return self._props.get('doc', None) + + @property + def props(self): + return self._props + + @property + def other_props(self): + return filter_keys_out(items=self._props, keys=FIELD_RESERVED_PROPS) + + def __str__(self): + return json.dumps(self.to_json()) + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = self.props.copy() + to_dump['type'] = self.type.to_json(names) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ +# Primitive Types + + +class PrimitiveSchema(Schema): + """Schema of a primitive Avro type. + + Valid primitive types are defined in PRIMITIVE_TYPES. + """ + + def __init__(self, data_type, other_props=None): + """Initializes a new schema object for the specified primitive type. + + Args: + data_type: Type of the schema to construct. Must be primitive. + """ + if data_type not in PRIMITIVE_TYPES: + raise AvroException('%r is not a valid primitive type.' % data_type) + super(PrimitiveSchema, self).__init__(data_type, other_props=other_props) + + @property + def name(self): + """Returns: the simple name of this schema.""" + # The name of a primitive type is the type itself. + return self.type + + @property + def fullname(self): + """Returns: the fully qualified name of this schema.""" + # The full name is the simple name for primitive schema. + return self.name + + def to_json(self, names=None): + if len(self.props) == 1: + return self.fullname + return self.props + + def __eq__(self, that): + return self.props == that.props + + +# ------------------------------------------------------------------------------ +# Complex Types (non-recursive) + + +class FixedSchema(NamedSchema): + def __init__( + self, + name, + namespace, + size, + names=None, + other_props=None, + ): + # Ensure valid ctor args + if not isinstance(size, int): + fail_msg = 'Fixed Schema requires a valid integer for size property.' + raise AvroException(fail_msg) + + super(FixedSchema, self).__init__( + data_type=FIXED, + name=name, + namespace=namespace, + names=names, + other_props=other_props, + ) + self._props['size'] = size + + @property + def size(self): + """Returns: the size of this fixed schema, in bytes.""" + return self._props['size'] + + def to_json(self, names=None): + if names is None: + names = Names() + if self.fullname in names.names: + return self.name_ref(names) + names.names[self.fullname] = self + return names.prune_namespace(self.props) + + def __eq__(self, that): + return self.props == that.props + + +# ------------------------------------------------------------------------------ + + +class EnumSchema(NamedSchema): + def __init__( + self, + name, + namespace, + symbols, + names=None, + doc=None, + other_props=None, + ): + """Initializes a new enumeration schema object. + + Args: + name: Simple name of this enumeration. + namespace: Optional namespace. + symbols: Ordered list of symbols defined in this enumeration. + names: + doc: + other_props: + """ + symbols = tuple(symbols) + symbol_set = frozenset(symbols) + if (len(symbol_set) != len(symbols) + or not all(map(lambda symbol: isinstance(symbol, _str), symbols))): + raise AvroException( + 'Invalid symbols for enum schema: %r.' % (symbols,)) + + super(EnumSchema, self).__init__( + data_type=ENUM, + name=name, + namespace=namespace, + names=names, + other_props=other_props, + ) + + self._props['symbols'] = symbols + if doc is not None: + self._props['doc'] = doc + + @property + def symbols(self): + """Returns: the symbols defined in this enum.""" + return self._props['symbols'] + + def to_json(self, names=None): + if names is None: + names = Names() + if self.fullname in names.names: + return self.name_ref(names) + names.names[self.fullname] = self + return names.prune_namespace(self.props) + + def __eq__(self, that): + return self.props == that.props + + +# ------------------------------------------------------------------------------ +# Complex Types (recursive) + + +class ArraySchema(Schema): + """Schema of an array.""" + + def __init__(self, items, other_props=None): + """Initializes a new array schema object. + + Args: + items: Avro schema of the array items. + other_props: + """ + super(ArraySchema, self).__init__( + data_type=ARRAY, + other_props=other_props, + ) + self._items_schema = items + self._props['items'] = items + + @property + def items(self): + """Returns: the schema of the items in this array.""" + return self._items_schema + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = self.props.copy() + item_schema = self.items + to_dump['items'] = item_schema.to_json(names) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ + + +class MapSchema(Schema): + """Schema of a map.""" + + def __init__(self, values, other_props=None): + """Initializes a new map schema object. + + Args: + values: Avro schema of the map values. + other_props: + """ + super(MapSchema, self).__init__( + data_type=MAP, + other_props=other_props, + ) + self._values_schema = values + self._props['values'] = values + + @property + def values(self): + """Returns: the schema of the values in this map.""" + return self._values_schema + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = self.props.copy() + to_dump['values'] = self.values.to_json(names) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ + + +class UnionSchema(Schema): + """Schema of a union.""" + + def __init__(self, schemas): + """Initializes a new union schema object. + + Args: + schemas: Ordered collection of schema branches in the union. + """ + super(UnionSchema, self).__init__(data_type=UNION) + self._schemas = tuple(schemas) + + # Validate the schema branches: + + # All named schema names are unique: + named_branches = tuple( + filter(lambda schema: schema.type in NAMED_TYPES, self._schemas)) + unique_names = frozenset(map(lambda schema: schema.fullname, named_branches)) + if len(unique_names) != len(named_branches): + raise AvroException( + 'Invalid union branches with duplicate schema name:%s' + % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) + + # Types are unique within unnamed schemas, and union is not allowed: + unnamed_branches = tuple( + filter(lambda schema: schema.type not in NAMED_TYPES, self._schemas)) + unique_types = frozenset(map(lambda schema: schema.type, unnamed_branches)) + if UNION in unique_types: + raise AvroException( + 'Invalid union branches contain other unions:%s' + % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) + if len(unique_types) != len(unnamed_branches): + raise AvroException( + 'Invalid union branches with duplicate type:%s' + % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) + + @property + def schemas(self): + """Returns: the ordered list of schema branches in the union.""" + return self._schemas + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = [] + for schema in self.schemas: + to_dump.append(schema.to_json(names)) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ + + +class ErrorUnionSchema(UnionSchema): + """Schema representing the declared errors of a protocol message.""" + + def __init__(self, schemas): + """Initializes an error-union schema. + + Args: + schema: collection of error schema. + """ + # Prepend "string" to handle system errors + schemas = [PrimitiveSchema(data_type=STRING)] + list(schemas) + super(ErrorUnionSchema, self).__init__(schemas=schemas) + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = [] + for schema in self.schemas: + # Don't print the system error schema + if schema.type == STRING: + continue + to_dump.append(schema.to_json(names)) + return to_dump + + +# ------------------------------------------------------------------------------ + + +class RecordSchema(NamedSchema): + """Schema of a record.""" + + @staticmethod + def _make_field(index, field_desc, names): + """Builds field schemas from a list of field JSON descriptors. + + Args: + index: 0-based index of the field in the record. + field_desc: JSON descriptors of a record field. + Return: + The field schema. + """ + field_schema = schema_from_json_data( + json_data=field_desc['type'], + names=names, + ) + other_props = ( + dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS))) + return Field( + data_type=field_schema, + name=field_desc['name'], + index=index, + has_default=('default' in field_desc), + default=field_desc.get('default', _NO_DEFAULT), + order=field_desc.get('order', None), + doc=field_desc.get('doc', None), + other_props=other_props, + ) + + @staticmethod + def make_field_list(field_desc_list, names): + """Builds field schemas from a list of field JSON descriptors. + + Guarantees field name unicity. + + Args: + field_desc_list: collection of field JSON descriptors. + names: Avro schema tracker. + Yields + Field schemas. + """ + for index, field_desc in enumerate(field_desc_list): + yield RecordSchema._make_field(index, field_desc, names) + + @staticmethod + def _make_field_map(fields): + """Builds the field map. + + Guarantees field name unicity. + + Args: + fields: iterable of field schema. + Returns: + A map of field schemas, indexed by name. + """ + field_map = {} + for field in fields: + if field.name in field_map: + raise SchemaParseException( + 'Duplicate record field name %r.' % field.name) + field_map[field.name] = field + return field_map + + def __init__( + self, + name, + namespace, + fields=None, + make_fields=None, + names=None, + record_type=RECORD, + doc=None, + other_props=None + ): + """Initializes a new record schema object. + + Args: + name: Name of the record (absolute or relative). + namespace: Optional namespace the record belongs to, if name is relative. + fields: collection of fields to add to this record. + Exactly one of fields or make_fields must be specified. + make_fields: function creating the fields that belong to the record. + The function signature is: make_fields(names) -> ordered field list. + Exactly one of fields or make_fields must be specified. + names: + record_type: Type of the record: one of RECORD, ERROR or REQUEST. + Protocol requests are not named. + doc: + other_props: + """ + if record_type == REQUEST: + # Protocol requests are not named: + super(RecordSchema, self).__init__( + data_type=REQUEST, + other_props=other_props, + ) + elif record_type in [RECORD, ERROR]: + # Register this record name in the tracker: + super(RecordSchema, self).__init__( + data_type=record_type, + name=name, + namespace=namespace, + names=names, + other_props=other_props, + ) + else: + raise SchemaParseException( + 'Invalid record type: %r.' % record_type) + + if record_type in [RECORD, ERROR]: + avro_name = names.get_name(name=name, namespace=namespace) + nested_names = names.new_with_default_namespace(namespace=avro_name.namespace) + elif record_type == REQUEST: + # Protocol request has no name: no need to change default namespace: + nested_names = names + + if fields is None: + fields = make_fields(names=nested_names) + else: + assert make_fields is None + self._fields = tuple(fields) + + self._field_map = RecordSchema._make_field_map(self._fields) + + self._props['fields'] = fields + if doc is not None: + self._props['doc'] = doc + + @property + def fields(self): + """Returns: the field schemas, as an ordered tuple.""" + return self._fields + + @property + def field_map(self): + """Returns: a read-only map of the field schemas index by field names.""" + return self._field_map + + def to_json(self, names=None): + if names is None: + names = Names() + # Request records don't have names + if self.type == REQUEST: + return [f.to_json(names) for f in self.fields] + + if self.fullname in names.names: + return self.name_ref(names) + names.names[self.fullname] = self + + to_dump = names.prune_namespace(self.props.copy()) + to_dump['fields'] = [f.to_json(names) for f in self.fields] + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ +# Module functions + + +def filter_keys_out(items, keys): + """Filters a collection of (key, value) items. + + Exclude any item whose key belongs to keys. + + Args: + items: Dictionary of items to filter the keys out of. + keys: Keys to filter out. + Yields: + Filtered items. + """ + for key, value in items.items(): + if key in keys: + continue + yield key, value + + +# ------------------------------------------------------------------------------ + + +def _schema_from_json_string(json_string, names): + if json_string in PRIMITIVE_TYPES: + return PrimitiveSchema(data_type=json_string) + + # Look for a known named schema: + schema = names.get_schema(name=json_string) + if schema is None: + raise SchemaParseException( + 'Unknown named schema %r, known names: %r.' + % (json_string, sorted(names.names))) + return schema + + +def _schema_from_json_array(json_array, names): + def MakeSchema(desc): + return schema_from_json_data(json_data=desc, names=names) + + return UnionSchema(map(MakeSchema, json_array)) + + +def _schema_from_json_object(json_object, names): + data_type = json_object.get('type') + if data_type is None: + raise SchemaParseException( + 'Avro schema JSON descriptor has no "type" property: %r' % json_object) + + other_props = dict( + filter_keys_out(items=json_object, keys=SCHEMA_RESERVED_PROPS)) + + if data_type in PRIMITIVE_TYPES: + # FIXME should not ignore other properties + result = PrimitiveSchema(data_type, other_props=other_props) + + elif data_type in NAMED_TYPES: + name = json_object.get('name') + namespace = json_object.get('namespace', names.default_namespace) + if data_type == FIXED: + size = json_object.get('size') + result = FixedSchema(name, namespace, size, names, other_props) + elif data_type == ENUM: + symbols = json_object.get('symbols') + doc = json_object.get('doc') + result = EnumSchema(name, namespace, symbols, names, doc, other_props) + + elif data_type in [RECORD, ERROR]: + field_desc_list = json_object.get('fields', ()) + + def MakeFields(names): + return tuple(RecordSchema.make_field_list(field_desc_list, names)) + + result = RecordSchema( + name=name, + namespace=namespace, + make_fields=MakeFields, + names=names, + record_type=data_type, + doc=json_object.get('doc'), + other_props=other_props, + ) + else: + raise Exception('Internal error: unknown type %r.' % data_type) + + elif data_type in VALID_TYPES: + # Unnamed, non-primitive Avro type: + + if data_type == ARRAY: + items_desc = json_object.get('items') + if items_desc is None: + raise SchemaParseException( + 'Invalid array schema descriptor with no "items" : %r.' + % json_object) + result = ArraySchema( + items=schema_from_json_data(items_desc, names), + other_props=other_props, + ) + + elif data_type == MAP: + values_desc = json_object.get('values') + if values_desc is None: + raise SchemaParseException( + 'Invalid map schema descriptor with no "values" : %r.' + % json_object) + result = MapSchema( + values=schema_from_json_data(values_desc, names=names), + other_props=other_props, + ) + + elif data_type == ERROR_UNION: + error_desc_list = json_object.get('declared_errors') + assert error_desc_list is not None + error_schemas = map( + lambda desc: schema_from_json_data(desc, names=names), + error_desc_list) + result = ErrorUnionSchema(schemas=error_schemas) + + else: + raise Exception('Internal error: unknown type %r.' % data_type) + else: + raise SchemaParseException( + 'Invalid JSON descriptor for an Avro schema: %r' % json_object) + return result + + +# Parsers for the JSON data types: +_JSONDataParserTypeMap = { + _str: _schema_from_json_string, + list: _schema_from_json_array, + dict: _schema_from_json_object, +} + + +def schema_from_json_data(json_data, names=None): + """Builds an Avro Schema from its JSON descriptor. + + Args: + json_data: JSON data representing the descriptor of the Avro schema. + names: Optional tracker for Avro named schemas. + Returns: + The Avro schema parsed from the JSON descriptor. + Raises: + SchemaParseException: if the descriptor is invalid. + """ + if names is None: + names = Names() + + # Select the appropriate parser based on the JSON data type: + parser = _JSONDataParserTypeMap.get(type(json_data)) + if parser is None: + raise SchemaParseException( + 'Invalid JSON descriptor for an Avro schema: %r.' % json_data) + return parser(json_data, names=names) + + +# ------------------------------------------------------------------------------ + + +def parse(json_string): + """Constructs a Schema from its JSON descriptor in text form. + + Args: + json_string: String representation of the JSON descriptor of the schema. + Returns: + The parsed schema. + Raises: + SchemaParseException: on JSON parsing error, + or if the JSON descriptor is invalid. + """ + try: + json_data = json.loads(json_string) + except Exception as exn: + raise SchemaParseException( + 'Error parsing schema from JSON: %r. ' + 'Error message: %r.' + % (json_string, exn)) + + # Initialize the names object + names = Names() + + # construct the Avro Schema object + return schema_from_json_data(json_data, names) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/base_client.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/base_client.py new file mode 100644 index 0000000..a2efa21 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/base_client.py @@ -0,0 +1,460 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import logging +import uuid +from typing import ( # pylint: disable=unused-import + Optional, + Any, + Tuple, +) + +try: + from urllib.parse import parse_qs, quote +except ImportError: + from urlparse import parse_qs # type: ignore + from urllib2 import quote # type: ignore + +import six + +from azure.core.configuration import Configuration +from azure.core.credentials import AzureSasCredential +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline import Pipeline +from azure.core.pipeline.transport import RequestsTransport, HttpTransport +from azure.core.pipeline.policies import ( + RedirectPolicy, + ContentDecodePolicy, + BearerTokenCredentialPolicy, + ProxyPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, + UserAgentPolicy, + AzureSasCredentialPolicy +) + +from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .models import LocationMode +from .authentication import SharedKeyCredentialPolicy +from .shared_access_signature import QueryStringConstants +from .request_handlers import serialize_batch_body, _get_batch_request_delimiter +from .policies import ( + StorageHeadersPolicy, + StorageContentValidation, + StorageRequestHook, + StorageResponseHook, + StorageLoggingPolicy, + StorageHosts, + QueueMessagePolicy, + ExponentialRetry, +) +from .._version import VERSION +from .response_handlers import process_storage_error, PartialBatchErrorException + + +_LOGGER = logging.getLogger(__name__) +_SERVICE_PARAMS = { + "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, + "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, + "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, + "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, +} + + +class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + parsed_url, # type: Any + service, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) + self._hosts = kwargs.get("_hosts") + self.scheme = parsed_url.scheme + + if service not in ["blob", "queue", "file-share", "dfs"]: + raise ValueError("Invalid service: {}".format(service)) + service_name = service.split('-')[0] + account = parsed_url.netloc.split(".{}.core.".format(service_name)) + + self.account_name = account[0] if len(account) > 1 else None + if not self.account_name and parsed_url.netloc.startswith("localhost") \ + or parsed_url.netloc.startswith("127.0.0.1"): + self.account_name = parsed_url.path.strip("/") + + self.credential = _format_shared_key_credential(self.account_name, credential) + if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): + raise ValueError("Token credential is only supported with HTTPS.") + + secondary_hostname = None + if hasattr(self.credential, "account_name"): + self.account_name = self.credential.account_name + secondary_hostname = "{}-secondary.{}.{}".format( + self.credential.account_name, service_name, SERVICE_HOST_BASE) + + if not self._hosts: + if len(account) > 1: + secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") + if kwargs.get("secondary_hostname"): + secondary_hostname = kwargs["secondary_hostname"] + primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') + self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} + + self.require_encryption = kwargs.get("require_encryption", False) + self.key_encryption_key = kwargs.get("key_encryption_key") + self.key_resolver_function = kwargs.get("key_resolver_function") + self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) + + def __enter__(self): + self._client.__enter__() + return self + + def __exit__(self, *args): + self._client.__exit__(*args) + + def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._client.close() + + @property + def url(self): + """The full endpoint URL to this entity, including SAS token if used. + + This could be either the primary endpoint, + or the secondary endpoint depending on the current :func:`location_mode`. + """ + return self._format_url(self._hosts[self._location_mode]) + + @property + def primary_endpoint(self): + """The full primary endpoint URL. + + :type: str + """ + return self._format_url(self._hosts[LocationMode.PRIMARY]) + + @property + def primary_hostname(self): + """The hostname of the primary endpoint. + + :type: str + """ + return self._hosts[LocationMode.PRIMARY] + + @property + def secondary_endpoint(self): + """The full secondary endpoint URL if configured. + + If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str + :raise ValueError: + """ + if not self._hosts[LocationMode.SECONDARY]: + raise ValueError("No secondary host configured.") + return self._format_url(self._hosts[LocationMode.SECONDARY]) + + @property + def secondary_hostname(self): + """The hostname of the secondary endpoint. + + If not available this will be None. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str or None + """ + return self._hosts[LocationMode.SECONDARY] + + @property + def location_mode(self): + """The location mode that the client is currently using. + + By default this will be "primary". Options include "primary" and "secondary". + + :type: str + """ + + return self._location_mode + + @location_mode.setter + def location_mode(self, value): + if self._hosts.get(value): + self._location_mode = value + self._client._config.url = self.url # pylint: disable=protected-access + else: + raise ValueError("No host URL for location mode: {}".format(value)) + + @property + def api_version(self): + """The version of the Storage API used for requests. + + :type: str + """ + return self._client._config.version # pylint: disable=protected-access + + def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): + query_str = "?" + if snapshot: + query_str += "snapshot={}&".format(self.snapshot) + if share_snapshot: + query_str += "sharesnapshot={}&".format(self.snapshot) + if sas_token and isinstance(credential, AzureSasCredential): + raise ValueError( + "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") + if sas_token and not credential: + query_str += sas_token + elif is_credential_sastoken(credential): + query_str += credential.lstrip("?") + credential = None + return query_str.rstrip("?&"), credential + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, "get_token"): + self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif isinstance(credential, AzureSasCredential): + self._credential_policy = AzureSasCredentialPolicy(credential) + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + + config = kwargs.get("_configuration") or create_configuration(**kwargs) + if kwargs.get("_pipeline"): + return config, kwargs["_pipeline"] + config.transport = kwargs.get("transport") # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + config.transport = RequestsTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + ContentDecodePolicy(response_encoding="utf-8"), + RedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), + config.retry_policy, + config.headers_policy, + StorageRequestHook(**kwargs), + self._credential_policy, + config.logging_policy, + StorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs) + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, Pipeline(config.transport, policies=policies) + + def _batch_send( + self, + *reqs, # type: HttpRequest + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + batch_id = str(uuid.uuid1()) + + request = self._client._client.post( # pylint: disable=protected-access + url='{}://{}/{}?{}comp=batch{}{}'.format( + self.scheme, + self.primary_hostname, + kwargs.pop('path', ""), + kwargs.pop('restype', ""), + kwargs.pop('sas', ""), + kwargs.pop('timeout', "") + ), + headers={ + 'x-ms-version': self.api_version, + "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) + } + ) + + policies = [StorageHeadersPolicy()] + if self._credential_policy: + policies.append(self._credential_policy) + + request.set_multipart_mixed( + *reqs, + policies=policies, + enforce_https=False + ) + + Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access + body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) + request.set_bytes_body(body) + + temp = request.multipart_mixed_info + request.multipart_mixed_info = None + pipeline_response = self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + request.multipart_mixed_info = temp + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() + if raise_on_any_failure: + parts = list(response.parts()) + if any(p for p in parts if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts + ) + raise error + return iter(parts) + return parts + except HttpResponseError as error: + process_storage_error(error) + +class TransportWrapper(HttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, transport): + self._transport = transport + + def send(self, request, **kwargs): + return self._transport.send(request, **kwargs) + + def open(self): + pass + + def close(self): + pass + + def __enter__(self): + pass + + def __exit__(self, *args): # pylint: disable=arguments-differ + pass + + +def _format_shared_key_credential(account_name, credential): + if isinstance(credential, six.string_types): + if not account_name: + raise ValueError("Unable to determine account name for shared key credential.") + credential = {"account_name": account_name, "account_key": credential} + if isinstance(credential, dict): + if "account_name" not in credential: + raise ValueError("Shared key credential missing 'account_name") + if "account_key" not in credential: + raise ValueError("Shared key credential missing 'account_key") + return SharedKeyCredentialPolicy(**credential) + return credential + + +def parse_connection_str(conn_str, credential, service): + conn_str = conn_str.rstrip(";") + conn_settings = [s.split("=", 1) for s in conn_str.split(";")] + if any(len(tup) != 2 for tup in conn_settings): + raise ValueError("Connection string is either blank or malformed.") + conn_settings = dict((key.upper(), val) for key, val in conn_settings) + endpoints = _SERVICE_PARAMS[service] + primary = None + secondary = None + if not credential: + try: + credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} + except KeyError: + credential = conn_settings.get("SHAREDACCESSSIGNATURE") + if endpoints["primary"] in conn_settings: + primary = conn_settings[endpoints["primary"]] + if endpoints["secondary"] in conn_settings: + secondary = conn_settings[endpoints["secondary"]] + else: + if endpoints["secondary"] in conn_settings: + raise ValueError("Connection string specifies only secondary endpoint.") + try: + primary = "{}://{}.{}.{}".format( + conn_settings["DEFAULTENDPOINTSPROTOCOL"], + conn_settings["ACCOUNTNAME"], + service, + conn_settings["ENDPOINTSUFFIX"], + ) + secondary = "{}-secondary.{}.{}".format( + conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] + ) + except KeyError: + pass + + if not primary: + try: + primary = "https://{}.{}.{}".format( + conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) + ) + except KeyError: + raise ValueError("Connection string missing required connection details.") + return primary, secondary, credential + + +def create_configuration(**kwargs): + # type: (**Any) -> Configuration + config = Configuration(**kwargs) + config.headers_policy = StorageHeadersPolicy(**kwargs) + config.user_agent_policy = UserAgentPolicy( + sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) + config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + config.logging_policy = StorageLoggingPolicy(**kwargs) + config.proxy_policy = ProxyPolicy(**kwargs) + + # Storage settings + config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) + config.copy_polling_interval = 15 + + # Block blob uploads + config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) + config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) + config.use_byte_buffer = kwargs.get("use_byte_buffer", False) + + # Page blob uploads + config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) + + # Datalake file uploads + config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) + + # Blob downloads + config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) + config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) + + # File uploads + config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) + return config + + +def parse_query(query_str): + sas_values = QueryStringConstants.to_list() + parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} + sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] + sas_token = None + if sas_params: + sas_token = "&".join(sas_params) + + snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") + return snapshot, sas_token + + +def is_credential_sastoken(credential): + if not credential or not isinstance(credential, six.string_types): + return False + + sas_values = QueryStringConstants.to_list() + parsed_query = parse_qs(credential.lstrip("?")) + if parsed_query and all([k in sas_values for k in parsed_query.keys()]): + return True + return False diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/base_client_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/base_client_async.py new file mode 100644 index 0000000..3e619c9 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/base_client_async.py @@ -0,0 +1,192 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging + +from azure.core.credentials import AzureSasCredential +from azure.core.pipeline import AsyncPipeline +from azure.core.async_paging import AsyncList +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline.policies import ( + ContentDecodePolicy, + AsyncBearerTokenCredentialPolicy, + AsyncRedirectPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, + AzureSasCredentialPolicy, +) +from azure.core.pipeline.transport import AsyncHttpTransport + +from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .authentication import SharedKeyCredentialPolicy +from .base_client import create_configuration +from .policies import ( + StorageContentValidation, + StorageRequestHook, + StorageHosts, + StorageHeadersPolicy, + QueueMessagePolicy +) +from .policies_async import AsyncStorageResponseHook + +from .response_handlers import process_storage_error, PartialBatchErrorException + +if TYPE_CHECKING: + from azure.core.pipeline import Pipeline + from azure.core.pipeline.transport import HttpRequest + from azure.core.configuration import Configuration +_LOGGER = logging.getLogger(__name__) + + +class AsyncStorageAccountHostsMixin(object): + + def __enter__(self): + raise TypeError("Async client only supports 'async with'.") + + def __exit__(self, *args): + pass + + async def __aenter__(self): + await self._client.__aenter__() + return self + + async def __aexit__(self, *args): + await self._client.__aexit__(*args) + + async def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._client.close() + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, 'get_token'): + self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif isinstance(credential, AzureSasCredential): + self._credential_policy = AzureSasCredentialPolicy(credential) + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + config = kwargs.get('_configuration') or create_configuration(**kwargs) + if kwargs.get('_pipeline'): + return config, kwargs['_pipeline'] + config.transport = kwargs.get('transport') # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + try: + from azure.core.pipeline.transport import AioHttpTransport + except ImportError: + raise ImportError("Unable to create async transport. Please check aiohttp is installed.") + config.transport = AioHttpTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.headers_policy, + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + StorageRequestHook(**kwargs), + self._credential_policy, + ContentDecodePolicy(response_encoding="utf-8"), + AsyncRedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), # type: ignore + config.retry_policy, + config.logging_policy, + AsyncStorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs), + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, AsyncPipeline(config.transport, policies=policies) + + async def _batch_send( + self, + *reqs, # type: HttpRequest + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + request = self._client._client.post( # pylint: disable=protected-access + url='{}://{}/{}?{}comp=batch{}{}'.format( + self.scheme, + self.primary_hostname, + kwargs.pop('path', ""), + kwargs.pop('restype', ""), + kwargs.pop('sas', ""), + kwargs.pop('timeout', "") + ), + headers={ + 'x-ms-version': self.api_version + } + ) + + policies = [StorageHeadersPolicy()] + if self._credential_policy: + policies.append(self._credential_policy) + + request.set_multipart_mixed( + *reqs, + policies=policies, + enforce_https=False + ) + + pipeline_response = await self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() # Return an AsyncIterator + if raise_on_any_failure: + parts_list = [] + async for part in parts: + parts_list.append(part) + if any(p for p in parts_list if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts_list + ) + raise error + return AsyncList(parts_list) + return parts + except HttpResponseError as error: + process_storage_error(error) + + +class AsyncTransportWrapper(AsyncHttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, async_transport): + self._transport = async_transport + + async def send(self, request, **kwargs): + return await self._transport.send(request, **kwargs) + + async def open(self): + pass + + async def close(self): + pass + + async def __aenter__(self): + pass + + async def __aexit__(self, *args): # pylint: disable=arguments-differ + pass diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/constants.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/constants.py new file mode 100644 index 0000000..bdee829 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/constants.py @@ -0,0 +1,27 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +from .._generated import AzureBlobStorage + + +X_MS_VERSION = AzureBlobStorage(url="get_api_version")._config.version # pylint: disable=protected-access + +# Socket timeout in seconds +CONNECTION_TIMEOUT = 20 +READ_TIMEOUT = 20 + +# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) +# The socket timeout is now the maximum total duration to send all data. +if sys.version_info >= (3, 5): + # the timeout to connect is 20 seconds, and the read timeout is 80000 seconds + # the 80000 seconds was calculated with: + # 4000MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) + READ_TIMEOUT = 80000 + +STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" + +SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/encryption.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/encryption.py new file mode 100644 index 0000000..62607cc --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/encryption.py @@ -0,0 +1,542 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os +from os import urandom +from json import ( + dumps, + loads, +) +from collections import OrderedDict + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import Cipher +from cryptography.hazmat.primitives.ciphers.algorithms import AES +from cryptography.hazmat.primitives.ciphers.modes import CBC +from cryptography.hazmat.primitives.padding import PKCS7 + +from azure.core.exceptions import HttpResponseError + +from .._version import VERSION +from . import encode_base64, decode_base64_to_bytes + + +_ENCRYPTION_PROTOCOL_V1 = '1.0' +_ERROR_OBJECT_INVALID = \ + '{0} does not define a complete interface. Value of {1} is either missing or invalid.' + + +def _validate_not_none(param_name, param): + if param is None: + raise ValueError('{0} should not be None.'.format(param_name)) + + +def _validate_key_encryption_key_wrap(kek): + # Note that None is not callable and so will fail the second clause of each check. + if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) + if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) + + +class _EncryptionAlgorithm(object): + ''' + Specifies which client encryption algorithm is used. + ''' + AES_CBC_256 = 'AES_CBC_256' + + +class _WrappedContentKey: + ''' + Represents the envelope key details stored on the service. + ''' + + def __init__(self, algorithm, encrypted_key, key_id): + ''' + :param str algorithm: + The algorithm used for wrapping. + :param bytes encrypted_key: + The encrypted content-encryption-key. + :param str key_id: + The key-encryption-key identifier string. + ''' + + _validate_not_none('algorithm', algorithm) + _validate_not_none('encrypted_key', encrypted_key) + _validate_not_none('key_id', key_id) + + self.algorithm = algorithm + self.encrypted_key = encrypted_key + self.key_id = key_id + + +class _EncryptionAgent: + ''' + Represents the encryption agent stored on the service. + It consists of the encryption protocol version and encryption algorithm used. + ''' + + def __init__(self, encryption_algorithm, protocol): + ''' + :param _EncryptionAlgorithm encryption_algorithm: + The algorithm used for encrypting the message contents. + :param str protocol: + The protocol version used for encryption. + ''' + + _validate_not_none('encryption_algorithm', encryption_algorithm) + _validate_not_none('protocol', protocol) + + self.encryption_algorithm = str(encryption_algorithm) + self.protocol = protocol + + +class _EncryptionData: + ''' + Represents the encryption data that is stored on the service. + ''' + + def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, + key_wrapping_metadata): + ''' + :param bytes content_encryption_IV: + The content encryption initialization vector. + :param _EncryptionAgent encryption_agent: + The encryption agent. + :param _WrappedContentKey wrapped_content_key: + An object that stores the wrapping algorithm, the key identifier, + and the encrypted key bytes. + :param dict key_wrapping_metadata: + A dict containing metadata related to the key wrapping. + ''' + + _validate_not_none('content_encryption_IV', content_encryption_IV) + _validate_not_none('encryption_agent', encryption_agent) + _validate_not_none('wrapped_content_key', wrapped_content_key) + + self.content_encryption_IV = content_encryption_IV + self.encryption_agent = encryption_agent + self.wrapped_content_key = wrapped_content_key + self.key_wrapping_metadata = key_wrapping_metadata + + +def _generate_encryption_data_dict(kek, cek, iv): + ''' + Generates and returns the encryption metadata as a dict. + + :param object kek: The key encryption key. See calling functions for more information. + :param bytes cek: The content encryption key. + :param bytes iv: The initialization vector. + :return: A dict containing all the encryption metadata. + :rtype: dict + ''' + # Encrypt the cek. + wrapped_cek = kek.wrap_key(cek) + + # Build the encryption_data dict. + # Use OrderedDict to comply with Java's ordering requirement. + wrapped_content_key = OrderedDict() + wrapped_content_key['KeyId'] = kek.get_kid() + wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) + wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() + + encryption_agent = OrderedDict() + encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 + encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 + + encryption_data_dict = OrderedDict() + encryption_data_dict['WrappedContentKey'] = wrapped_content_key + encryption_data_dict['EncryptionAgent'] = encryption_agent + encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) + encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} + + return encryption_data_dict + + +def _dict_to_encryption_data(encryption_data_dict): + ''' + Converts the specified dictionary to an EncryptionData object for + eventual use in decryption. + + :param dict encryption_data_dict: + The dictionary containing the encryption data. + :return: an _EncryptionData object built from the dictionary. + :rtype: _EncryptionData + ''' + try: + if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: + raise ValueError("Unsupported encryption version.") + except KeyError: + raise ValueError("Unsupported encryption version.") + wrapped_content_key = encryption_data_dict['WrappedContentKey'] + wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], + decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), + wrapped_content_key['KeyId']) + + encryption_agent = encryption_data_dict['EncryptionAgent'] + encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], + encryption_agent['Protocol']) + + if 'KeyWrappingMetadata' in encryption_data_dict: + key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] + else: + key_wrapping_metadata = None + + encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), + encryption_agent, + wrapped_content_key, + key_wrapping_metadata) + + return encryption_data + + +def _generate_AES_CBC_cipher(cek, iv): + ''' + Generates and returns an encryption cipher for AES CBC using the given cek and iv. + + :param bytes[] cek: The content encryption key for the cipher. + :param bytes[] iv: The initialization vector for the cipher. + :return: A cipher for encrypting in AES256 CBC. + :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher + ''' + + backend = default_backend() + algorithm = AES(cek) + mode = CBC(iv) + return Cipher(algorithm, mode, backend) + + +def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): + ''' + Extracts and returns the content_encryption_key stored in the encryption_data object + and performs necessary validation on all parameters. + :param _EncryptionData encryption_data: + The encryption metadata of the retrieved value. + :param obj key_encryption_key: + The key_encryption_key used to unwrap the cek. Please refer to high-level service object + instance variables for more details. + :param func key_resolver: + A function used that, given a key_id, will return a key_encryption_key. Please refer + to high-level service object instance variables for more details. + :return: the content_encryption_key stored in the encryption_data object. + :rtype: bytes[] + ''' + + _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) + _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) + + if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: + raise ValueError('Encryption version is not supported.') + + content_encryption_key = None + + # If the resolver exists, give priority to the key it finds. + if key_resolver is not None: + key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) + + _validate_not_none('key_encryption_key', key_encryption_key) + if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) + if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): + raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') + # Will throw an exception if the specified algorithm is not supported. + content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, + encryption_data.wrapped_content_key.algorithm) + _validate_not_none('content_encryption_key', content_encryption_key) + + return content_encryption_key + + +def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): + ''' + Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. + Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). + Returns the original plaintex. + + :param str message: + The ciphertext to be decrypted. + :param _EncryptionData encryption_data: + The metadata associated with this ciphertext. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted plaintext. + :rtype: str + ''' + _validate_not_none('message', message) + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) + + if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: + raise ValueError('Specified encryption algorithm is not supported.') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) + + # decrypt data + decrypted_data = message + decryptor = cipher.decryptor() + decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) + + # unpad data + unpadder = PKCS7(128).unpadder() + decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) + + return decrypted_data + + +def encrypt_blob(blob, key_encryption_key): + ''' + Encrypts the given blob using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encryption metadata. This method should + only be used when a blob is small enough for single shot upload. Encrypting larger blobs + is done as a part of the upload_data_chunks method. + + :param bytes blob: + The blob to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. + :rtype: (str, bytes) + ''' + + _validate_not_none('blob', blob) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(blob) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + + return dumps(encryption_data), encrypted_data + + +def generate_blob_encryption_data(key_encryption_key): + ''' + Generates the encryption_metadata for the blob. + + :param bytes key_encryption_key: + The key-encryption-key used to wrap the cek associate with this blob. + :return: A tuple containing the cek and iv for this blob as well as the + serialized encryption metadata for the blob. + :rtype: (bytes, bytes, str) + ''' + encryption_data = None + content_encryption_key = None + initialization_vector = None + if key_encryption_key: + _validate_key_encryption_key_wrap(key_encryption_key) + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + encryption_data = _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + encryption_data = dumps(encryption_data) + + return content_encryption_key, initialization_vector, encryption_data + + +def decrypt_blob(require_encryption, key_encryption_key, key_resolver, + content, start_offset, end_offset, response_headers): + ''' + Decrypts the given blob contents and returns only the requested range. + + :param bool require_encryption: + Whether or not the calling blob service requires objects to be decrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :param key_resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted blob content. + :rtype: bytes + ''' + try: + encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) + except: # pylint: disable=bare-except + if require_encryption: + raise ValueError( + 'Encryption required, but received data does not contain appropriate metatadata.' + \ + 'Data was either not encrypted or metadata has been lost.') + + return content + + if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: + raise ValueError('Specified encryption algorithm is not supported.') + + blob_type = response_headers['x-ms-blob-type'] + + iv = None + unpad = False + if 'content-range' in response_headers: + content_range = response_headers['content-range'] + # Format: 'bytes x-y/size' + + # Ignore the word 'bytes' + content_range = content_range.split(' ') + + content_range = content_range[1].split('-') + content_range = content_range[1].split('/') + end_range = int(content_range[0]) + blob_size = int(content_range[1]) + + if start_offset >= 16: + iv = content[:16] + content = content[16:] + start_offset -= 16 + else: + iv = encryption_data.content_encryption_IV + + if end_range == blob_size - 1: + unpad = True + else: + unpad = True + iv = encryption_data.content_encryption_IV + + if blob_type == 'PageBlob': + unpad = False + + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) + cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) + decryptor = cipher.decryptor() + + content = decryptor.update(content) + decryptor.finalize() + if unpad: + unpadder = PKCS7(128).unpadder() + content = unpadder.update(content) + unpadder.finalize() + + return content[start_offset: len(content) - end_offset] + + +def get_blob_encryptor_and_padder(cek, iv, should_pad): + encryptor = None + padder = None + + if cek is not None and iv is not None: + cipher = _generate_AES_CBC_cipher(cek, iv) + encryptor = cipher.encryptor() + padder = PKCS7(128).padder() if should_pad else None + + return encryptor, padder + + +def encrypt_queue_message(message, key_encryption_key): + ''' + Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encrypted message and the encryption metadata. + + :param object message: + The plain text messge to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A json-formatted string containing the encrypted message and the encryption metadata. + :rtype: str + ''' + + _validate_not_none('message', message) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = os.urandom(32) + initialization_vector = os.urandom(16) + + # Queue encoding functions all return unicode strings, and encryption should + # operate on binary strings. + message = message.encode('utf-8') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(message) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + + # Build the dictionary structure. + queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), + 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector)} + + return dumps(queue_message) + + +def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): + ''' + Returns the decrypted message contents from an EncryptedQueueMessage. + If no encryption metadata is present, will return the unaltered message. + :param str message: + The JSON formatted QueueEncryptedMessage contents with all associated metadata. + :param bool require_encryption: + If set, will enforce that the retrieved messages are encrypted and decrypt them. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The plain text message from the queue message. + :rtype: str + ''' + + try: + message = loads(message) + + encryption_data = _dict_to_encryption_data(message['EncryptionData']) + decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) + except (KeyError, ValueError): + # Message was not json formatted and so was not encrypted + # or the user provided a json formatted message. + if require_encryption: + raise ValueError('Message was not encrypted.') + + return message + try: + return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=response, + error=error) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/models.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/models.py new file mode 100644 index 0000000..c51356b --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/models.py @@ -0,0 +1,466 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-instance-attributes +from enum import Enum + + +def get_enum_value(value): + if value is None or value in ["None", ""]: + return None + try: + return value.value + except AttributeError: + return value + + +class StorageErrorCode(str, Enum): + + # Generic storage values + account_already_exists = "AccountAlreadyExists" + account_being_created = "AccountBeingCreated" + account_is_disabled = "AccountIsDisabled" + authentication_failed = "AuthenticationFailed" + authorization_failure = "AuthorizationFailure" + no_authentication_information = "NoAuthenticationInformation" + condition_headers_not_supported = "ConditionHeadersNotSupported" + condition_not_met = "ConditionNotMet" + empty_metadata_key = "EmptyMetadataKey" + insufficient_account_permissions = "InsufficientAccountPermissions" + internal_error = "InternalError" + invalid_authentication_info = "InvalidAuthenticationInfo" + invalid_header_value = "InvalidHeaderValue" + invalid_http_verb = "InvalidHttpVerb" + invalid_input = "InvalidInput" + invalid_md5 = "InvalidMd5" + invalid_metadata = "InvalidMetadata" + invalid_query_parameter_value = "InvalidQueryParameterValue" + invalid_range = "InvalidRange" + invalid_resource_name = "InvalidResourceName" + invalid_uri = "InvalidUri" + invalid_xml_document = "InvalidXmlDocument" + invalid_xml_node_value = "InvalidXmlNodeValue" + md5_mismatch = "Md5Mismatch" + metadata_too_large = "MetadataTooLarge" + missing_content_length_header = "MissingContentLengthHeader" + missing_required_query_parameter = "MissingRequiredQueryParameter" + missing_required_header = "MissingRequiredHeader" + missing_required_xml_node = "MissingRequiredXmlNode" + multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" + operation_timed_out = "OperationTimedOut" + out_of_range_input = "OutOfRangeInput" + out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" + request_body_too_large = "RequestBodyTooLarge" + resource_type_mismatch = "ResourceTypeMismatch" + request_url_failed_to_parse = "RequestUrlFailedToParse" + resource_already_exists = "ResourceAlreadyExists" + resource_not_found = "ResourceNotFound" + server_busy = "ServerBusy" + unsupported_header = "UnsupportedHeader" + unsupported_xml_node = "UnsupportedXmlNode" + unsupported_query_parameter = "UnsupportedQueryParameter" + unsupported_http_verb = "UnsupportedHttpVerb" + + # Blob values + append_position_condition_not_met = "AppendPositionConditionNotMet" + blob_already_exists = "BlobAlreadyExists" + blob_not_found = "BlobNotFound" + blob_overwritten = "BlobOverwritten" + blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" + block_count_exceeds_limit = "BlockCountExceedsLimit" + block_list_too_long = "BlockListTooLong" + cannot_change_to_lower_tier = "CannotChangeToLowerTier" + cannot_verify_copy_source = "CannotVerifyCopySource" + container_already_exists = "ContainerAlreadyExists" + container_being_deleted = "ContainerBeingDeleted" + container_disabled = "ContainerDisabled" + container_not_found = "ContainerNotFound" + content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" + copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" + copy_id_mismatch = "CopyIdMismatch" + feature_version_mismatch = "FeatureVersionMismatch" + incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" + incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" + infinite_lease_duration_required = "InfiniteLeaseDurationRequired" + invalid_blob_or_block = "InvalidBlobOrBlock" + invalid_blob_tier = "InvalidBlobTier" + invalid_blob_type = "InvalidBlobType" + invalid_block_id = "InvalidBlockId" + invalid_block_list = "InvalidBlockList" + invalid_operation = "InvalidOperation" + invalid_page_range = "InvalidPageRange" + invalid_source_blob_type = "InvalidSourceBlobType" + invalid_source_blob_url = "InvalidSourceBlobUrl" + invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" + lease_already_present = "LeaseAlreadyPresent" + lease_already_broken = "LeaseAlreadyBroken" + lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" + lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" + lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" + lease_id_missing = "LeaseIdMissing" + lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" + lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" + lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" + lease_lost = "LeaseLost" + lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" + lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" + lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" + max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" + no_pending_copy_operation = "NoPendingCopyOperation" + operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" + pending_copy_operation = "PendingCopyOperation" + previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" + previous_snapshot_not_found = "PreviousSnapshotNotFound" + previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" + sequence_number_condition_not_met = "SequenceNumberConditionNotMet" + sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" + snapshot_count_exceeded = "SnapshotCountExceeded" + snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" + snapshots_present = "SnapshotsPresent" + source_condition_not_met = "SourceConditionNotMet" + system_in_use = "SystemInUse" + target_condition_not_met = "TargetConditionNotMet" + unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" + blob_being_rehydrated = "BlobBeingRehydrated" + blob_archived = "BlobArchived" + blob_not_archived = "BlobNotArchived" + + # Queue values + invalid_marker = "InvalidMarker" + message_not_found = "MessageNotFound" + message_too_large = "MessageTooLarge" + pop_receipt_mismatch = "PopReceiptMismatch" + queue_already_exists = "QueueAlreadyExists" + queue_being_deleted = "QueueBeingDeleted" + queue_disabled = "QueueDisabled" + queue_not_empty = "QueueNotEmpty" + queue_not_found = "QueueNotFound" + + # File values + cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" + client_cache_flush_delay = "ClientCacheFlushDelay" + delete_pending = "DeletePending" + directory_not_empty = "DirectoryNotEmpty" + file_lock_conflict = "FileLockConflict" + invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" + parent_not_found = "ParentNotFound" + read_only_attribute = "ReadOnlyAttribute" + share_already_exists = "ShareAlreadyExists" + share_being_deleted = "ShareBeingDeleted" + share_disabled = "ShareDisabled" + share_not_found = "ShareNotFound" + sharing_violation = "SharingViolation" + share_snapshot_in_progress = "ShareSnapshotInProgress" + share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" + share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" + share_has_snapshots = "ShareHasSnapshots" + container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" + + # DataLake values + content_length_must_be_zero = 'ContentLengthMustBeZero' + path_already_exists = 'PathAlreadyExists' + invalid_flush_position = 'InvalidFlushPosition' + invalid_property_name = 'InvalidPropertyName' + invalid_source_uri = 'InvalidSourceUri' + unsupported_rest_version = 'UnsupportedRestVersion' + file_system_not_found = 'FilesystemNotFound' + path_not_found = 'PathNotFound' + rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' + source_path_not_found = 'SourcePathNotFound' + destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' + file_system_already_exists = 'FilesystemAlreadyExists' + file_system_being_deleted = 'FilesystemBeingDeleted' + invalid_destination_path = 'InvalidDestinationPath' + invalid_rename_source_path = 'InvalidRenameSourcePath' + invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' + lease_is_already_broken = 'LeaseIsAlreadyBroken' + lease_name_mismatch = 'LeaseNameMismatch' + path_conflict = 'PathConflict' + source_path_is_being_deleted = 'SourcePathIsBeingDeleted' + + +class DictMixin(object): + + def __setitem__(self, key, item): + self.__dict__[key] = item + + def __getitem__(self, key): + return self.__dict__[key] + + def __repr__(self): + return str(self) + + def __len__(self): + return len(self.keys()) + + def __delitem__(self, key): + self.__dict__[key] = None + + def __eq__(self, other): + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self): + return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) + + def has_key(self, k): + return k in self.__dict__ + + def update(self, *args, **kwargs): + return self.__dict__.update(*args, **kwargs) + + def keys(self): + return [k for k in self.__dict__ if not k.startswith('_')] + + def values(self): + return [v for k, v in self.__dict__.items() if not k.startswith('_')] + + def items(self): + return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] + + def get(self, key, default=None): + if key in self.__dict__: + return self.__dict__[key] + return default + + +class LocationMode(object): + """ + Specifies the location the request should be sent to. This mode only applies + for RA-GRS accounts which allow secondary read access. All other account types + must use PRIMARY. + """ + + PRIMARY = 'primary' #: Requests should be sent to the primary location. + SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. + + +class ResourceTypes(object): + """ + Specifies the resource types that are accessible with the account SAS. + + :param bool service: + Access to service-level APIs (e.g., Get/Set Service Properties, + Get Service Stats, List Containers/Queues/Shares) + :param bool container: + Access to container-level APIs (e.g., Create/Delete Container, + Create/Delete Queue, Create/Delete Share, + List Blobs/Files and Directories) + :param bool object: + Access to object-level APIs for blobs, queue messages, and + files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) + """ + + def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin + self.service = service + self.container = container + self.object = object + self._str = (('s' if self.service else '') + + ('c' if self.container else '') + + ('o' if self.object else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create a ResourceTypes from a string. + + To specify service, container, or object you need only to + include the first letter of the word in the string. E.g. service and container, + you would provide a string "sc". + + :param str string: Specify service, container, or object in + in the string with the first letter of the word. + :return: A ResourceTypes object + :rtype: ~azure.storage.blob.ResourceTypes + """ + res_service = 's' in string + res_container = 'c' in string + res_object = 'o' in string + + parsed = cls(res_service, res_container, res_object) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class AccountSasPermissions(object): + """ + :class:`~ResourceTypes` class to be used with generate_account_sas + function and for the AccessPolicies used with set_*_acl. There are two types of + SAS which may be used to grant resource access. One is to grant access to a + specific resource (resource-specific). Another is to grant access to the + entire service for a specific account and allow certain operations based on + perms found here. + + :param bool read: + Valid for all signed resources types (Service, Container, and Object). + Permits read permissions to the specified resource type. + :param bool write: + Valid for all signed resources types (Service, Container, and Object). + Permits write permissions to the specified resource type. + :param bool delete: + Valid for Container and Object resource types, except for queue messages. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool list: + Valid for Service and Container resource types only. + :param bool add: + Valid for the following Object resource types only: queue messages, and append blobs. + :param bool create: + Valid for the following Object resource types only: blobs and files. + Users can create new blobs or files, but may not overwrite existing + blobs or files. + :param bool update: + Valid for the following Object resource types only: queue messages. + :param bool process: + Valid for the following Object resource type only: queue messages. + :keyword bool tag: + To enable set or get tags on the blobs in the container. + :keyword bool filter_by_tags: + To enable get blobs by tags, this should be used together with list permission. + """ + def __init__(self, read=False, write=False, delete=False, + list=False, # pylint: disable=redefined-builtin + add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): + self.read = read + self.write = write + self.delete = delete + self.delete_previous_version = delete_previous_version + self.list = list + self.add = add + self.create = create + self.update = update + self.process = process + self.tag = kwargs.pop('tag', False) + self.filter_by_tags = kwargs.pop('filter_by_tags', False) + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('l' if self.list else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('u' if self.update else '') + + ('p' if self.process else '') + + ('f' if self.filter_by_tags else '') + + ('t' if self.tag else '') + ) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create AccountSasPermissions from a string. + + To specify read, write, delete, etc. permissions you need only to + include the first letter of the word in the string. E.g. for read and write + permissions you would provide a string "rw". + + :param str permission: Specify permissions in + the string with the first letter of the word. + :return: An AccountSasPermissions object + :rtype: ~azure.storage.blob.AccountSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission + p_list = 'l' in permission + p_add = 'a' in permission + p_create = 'c' in permission + p_update = 'u' in permission + p_process = 'p' in permission + p_tag = 't' in permission + p_filter_by_tags = 'f' in permission + parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, + list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, + filter_by_tags=p_filter_by_tags) + + return parsed + +class Services(object): + """Specifies the services accessible with the account SAS. + + :param bool blob: + Access for the `~azure.storage.blob.BlobServiceClient` + :param bool queue: + Access for the `~azure.storage.queue.QueueServiceClient` + :param bool fileshare: + Access for the `~azure.storage.fileshare.ShareServiceClient` + """ + + def __init__(self, blob=False, queue=False, fileshare=False): + self.blob = blob + self.queue = queue + self.fileshare = fileshare + self._str = (('b' if self.blob else '') + + ('q' if self.queue else '') + + ('f' if self.fileshare else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create Services from a string. + + To specify blob, queue, or file you need only to + include the first letter of the word in the string. E.g. for blob and queue + you would provide a string "bq". + + :param str string: Specify blob, queue, or file in + in the string with the first letter of the word. + :return: A Services object + :rtype: ~azure.storage.blob.Services + """ + res_blob = 'b' in string + res_queue = 'q' in string + res_file = 'f' in string + + parsed = cls(res_blob, res_queue, res_file) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class UserDelegationKey(object): + """ + Represents a user delegation key, provided to the user by Azure Storage + based on their Azure Active Directory access token. + + The fields are saved as simple strings since the user does not have to interact with this object; + to generate an identify SAS, the user can simply pass it to the right API. + + :ivar str signed_oid: + Object ID of this token. + :ivar str signed_tid: + Tenant ID of the tenant that issued this token. + :ivar str signed_start: + The datetime this token becomes valid. + :ivar str signed_expiry: + The datetime this token expires. + :ivar str signed_service: + What service this key is valid for. + :ivar str signed_version: + The version identifier of the REST service that created this token. + :ivar str value: + The user delegation key. + """ + def __init__(self): + self.signed_oid = None + self.signed_tid = None + self.signed_start = None + self.signed_expiry = None + self.signed_service = None + self.signed_version = None + self.value = None diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/parser.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/parser.py new file mode 100644 index 0000000..c6feba8 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/parser.py @@ -0,0 +1,20 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys + +if sys.version_info < (3,): + def _str(value): + if isinstance(value, unicode): # pylint: disable=undefined-variable + return value.encode('utf-8') + + return str(value) +else: + _str = str + + +def _to_utc_datetime(value): + return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/policies.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/policies.py new file mode 100644 index 0000000..c9bc798 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/policies.py @@ -0,0 +1,610 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import re +import random +from time import time +from io import SEEK_SET, UnsupportedOperation +import logging +import uuid +import types +from typing import Any, TYPE_CHECKING +from wsgiref.handlers import format_date_time +try: + from urllib.parse import ( + urlparse, + parse_qsl, + urlunparse, + urlencode, + ) +except ImportError: + from urllib import urlencode # type: ignore + from urlparse import ( # type: ignore + urlparse, + parse_qsl, + urlunparse, + ) + +from azure.core.pipeline.policies import ( + HeadersPolicy, + SansIOHTTPPolicy, + NetworkTraceLoggingPolicy, + HTTPPolicy, + RequestHistory +) +from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError + +from .models import LocationMode + +try: + _unicode_type = unicode # type: ignore +except NameError: + _unicode_type = str + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +def encode_base64(data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def is_exhausted(settings): + """Are we out of retries?""" + retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) + retry_counts = list(filter(None, retry_counts)) + if not retry_counts: + return False + return min(retry_counts) < 0 + + +def retry_hook(settings, **kwargs): + if settings['hook']: + settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) + + +def is_retry(response, mode): + """Is this method/status code retryable? (Based on whitelists and control + variables such as the number of total retries to allow, whether to + respect the Retry-After header, whether this header is present, and + whether the returned status code is on the list of status codes to + be retried upon on the presence of the aforementioned header) + """ + status = response.http_response.status_code + if 300 <= status < 500: + # An exception occured, but in most cases it was expected. Examples could + # include a 309 Conflict or 412 Precondition Failed. + if status == 404 and mode == LocationMode.SECONDARY: + # Response code 404 should be retried if secondary was used. + return True + if status == 408: + # Response code 408 is a timeout and should be retried. + return True + return False + if status >= 500: + # Response codes above 500 with the exception of 501 Not Implemented and + # 505 Version Not Supported indicate a server issue and should be retried. + if status in [501, 505]: + return False + return True + return False + + +def urljoin(base_url, stub_url): + parsed = urlparse(base_url) + parsed = parsed._replace(path=parsed.path + '/' + stub_url) + return parsed.geturl() + + +class QueueMessagePolicy(SansIOHTTPPolicy): + + def on_request(self, request): + message_id = request.context.options.pop('queue_message_id', None) + if message_id: + request.http_request.url = urljoin( + request.http_request.url, + message_id) + + +class StorageHeadersPolicy(HeadersPolicy): + request_id_header_name = 'x-ms-client-request-id' + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + super(StorageHeadersPolicy, self).on_request(request) + current_time = format_date_time(time()) + request.http_request.headers['x-ms-date'] = current_time + + custom_id = request.context.options.pop('client_request_id', None) + request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) + + # def on_response(self, request, response): + # # raise exception if the echoed client request id from the service is not identical to the one we sent + # if self.request_id_header_name in response.http_response.headers: + + # client_request_id = request.http_request.headers.get(self.request_id_header_name) + + # if response.http_response.headers[self.request_id_header_name] != client_request_id: + # raise AzureError( + # "Echoed client request ID: {} does not match sent client request ID: {}. " + # "Service request ID: {}".format( + # response.http_response.headers[self.request_id_header_name], client_request_id, + # response.http_response.headers['x-ms-request-id']), + # response=response.http_response + # ) + + +class StorageHosts(SansIOHTTPPolicy): + + def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument + self.hosts = hosts + super(StorageHosts, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + request.context.options['hosts'] = self.hosts + parsed_url = urlparse(request.http_request.url) + + # Detect what location mode we're currently requesting with + location_mode = LocationMode.PRIMARY + for key, value in self.hosts.items(): + if parsed_url.netloc == value: + location_mode = key + + # See if a specific location mode has been specified, and if so, redirect + use_location = request.context.options.pop('use_location', None) + if use_location: + # Lock retries to the specific location + request.context.options['retry_to_secondary'] = False + if use_location not in self.hosts: + raise ValueError("Attempting to use undefined host location {}".format(use_location)) + if use_location != location_mode: + # Update request URL to use the specified location + updated = parsed_url._replace(netloc=self.hosts[use_location]) + request.http_request.url = updated.geturl() + location_mode = use_location + + request.context.options['location_mode'] = location_mode + + +class StorageLoggingPolicy(NetworkTraceLoggingPolicy): + """A policy that logs HTTP request and response to the DEBUG logger. + + This accepts both global configuration, and per-request level with "enable_http_logger" + """ + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + http_request = request.http_request + options = request.context.options + if options.pop("logging_enable", self.enable_http_logger): + request.context["logging_enable"] = True + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + log_url = http_request.url + query_params = http_request.query + if 'sig' in query_params: + log_url = log_url.replace(query_params['sig'], "sig=*****") + _LOGGER.debug("Request URL: %r", log_url) + _LOGGER.debug("Request method: %r", http_request.method) + _LOGGER.debug("Request headers:") + for header, value in http_request.headers.items(): + if header.lower() == 'authorization': + value = '*****' + elif header.lower() == 'x-ms-copy-source' and 'sig' in value: + # take the url apart and scrub away the signed signature + scheme, netloc, path, params, query, fragment = urlparse(value) + parsed_qs = dict(parse_qsl(query)) + parsed_qs['sig'] = '*****' + + # the SAS needs to be put back together + value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) + + _LOGGER.debug(" %r: %r", header, value) + _LOGGER.debug("Request body:") + + # We don't want to log the binary data of a file upload. + if isinstance(http_request.body, types.GeneratorType): + _LOGGER.debug("File upload") + else: + _LOGGER.debug(str(http_request.body)) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log request: %r", err) + + def on_response(self, request, response): + # type: (PipelineRequest, PipelineResponse, Any) -> None + if response.context.pop("logging_enable", self.enable_http_logger): + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + _LOGGER.debug("Response status: %r", response.http_response.status_code) + _LOGGER.debug("Response headers:") + for res_header, value in response.http_response.headers.items(): + _LOGGER.debug(" %r: %r", res_header, value) + + # We don't want to log binary data if the response is a file. + _LOGGER.debug("Response content:") + pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) + header = response.http_response.headers.get('content-disposition') + + if header and pattern.match(header): + filename = header.partition('=')[2] + _LOGGER.debug("File attachments: %s", filename) + elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): + _LOGGER.debug("Body contains binary data.") + elif response.http_response.headers.get("content-type", "").startswith("image"): + _LOGGER.debug("Body contains image data.") + else: + if response.context.options.get('stream', False): + _LOGGER.debug("Body is streamable") + else: + _LOGGER.debug(response.http_response.text()) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log response: %s", repr(err)) + + +class StorageRequestHook(SansIOHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._request_callback = kwargs.get('raw_request_hook') + super(StorageRequestHook, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, **Any) -> PipelineResponse + request_callback = request.context.options.pop('raw_request_hook', self._request_callback) + if request_callback: + request_callback(request) + + +class StorageResponseHook(HTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(StorageResponseHook, self).__init__() + + def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = self.next.send(request) + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + response_callback(response) + request.context['response_callback'] = response_callback + return response + + +class StorageContentValidation(SansIOHTTPPolicy): + """A simple policy that sends the given headers + with the request. + + This will overwrite any headers already defined in the request. + """ + header_name = 'Content-MD5' + + def __init__(self, **kwargs): # pylint: disable=unused-argument + super(StorageContentValidation, self).__init__() + + @staticmethod + def get_content_md5(data): + md5 = hashlib.md5() # nosec + if isinstance(data, bytes): + md5.update(data) + elif hasattr(data, 'read'): + pos = 0 + try: + pos = data.tell() + except: # pylint: disable=bare-except + pass + for chunk in iter(lambda: data.read(4096), b""): + md5.update(chunk) + try: + data.seek(pos, SEEK_SET) + except (AttributeError, IOError): + raise ValueError("Data should be bytes or a seekable file-like object.") + else: + raise ValueError("Data should be bytes or a seekable file-like object.") + + return md5.digest() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + validate_content = request.context.options.pop('validate_content', False) + if validate_content and request.http_request.method != 'GET': + computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) + request.http_request.headers[self.header_name] = computed_md5 + request.context['validate_content_md5'] = computed_md5 + request.context['validate_content'] = validate_content + + def on_response(self, request, response): + if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): + computed_md5 = request.context.get('validate_content_md5') or \ + encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) + if response.http_response.headers['content-md5'] != computed_md5: + raise AzureError( + 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( + response.http_response.headers['content-md5'], computed_md5), + response=response.http_response + ) + + +class StorageRetryPolicy(HTTPPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + def __init__(self, **kwargs): + self.total_retries = kwargs.pop('retry_total', 10) + self.connect_retries = kwargs.pop('retry_connect', 3) + self.read_retries = kwargs.pop('retry_read', 3) + self.status_retries = kwargs.pop('retry_status', 3) + self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) + super(StorageRetryPolicy, self).__init__() + + def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use + """ + A function which sets the next host location on the request, if applicable. + + :param ~azure.storage.models.RetryContext context: + The retry context containing the previous host location and the request + to evaluate and possibly modify. + """ + if settings['hosts'] and all(settings['hosts'].values()): + url = urlparse(request.url) + # If there's more than one possible location, retry to the alternative + if settings['mode'] == LocationMode.PRIMARY: + settings['mode'] = LocationMode.SECONDARY + else: + settings['mode'] = LocationMode.PRIMARY + updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) + request.url = updated.geturl() + + def configure_retries(self, request): # pylint: disable=no-self-use + body_position = None + if hasattr(request.http_request.body, 'read'): + try: + body_position = request.http_request.body.tell() + except (AttributeError, UnsupportedOperation): + # if body position cannot be obtained, then retries will not work + pass + options = request.context.options + return { + 'total': options.pop("retry_total", self.total_retries), + 'connect': options.pop("retry_connect", self.connect_retries), + 'read': options.pop("retry_read", self.read_retries), + 'status': options.pop("retry_status", self.status_retries), + 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), + 'mode': options.pop("location_mode", LocationMode.PRIMARY), + 'hosts': options.pop("hosts", None), + 'hook': options.pop("retry_hook", None), + 'body_position': body_position, + 'count': 0, + 'history': [] + } + + def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use + """ Formula for computing the current backoff. + Should be calculated by child class. + + :rtype: float + """ + return 0 + + def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + transport.sleep(backoff) + + def increment(self, settings, request, response=None, error=None): + """Increment the retry counters. + + :param response: A pipeline response object. + :param error: An error encountered during the request, or + None if the response was received successfully. + + :return: Whether the retry attempts are exhausted. + """ + settings['total'] -= 1 + + if error and isinstance(error, ServiceRequestError): + # Errors when we're fairly sure that the server did not receive the + # request, so it should be safe to retry. + settings['connect'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + elif error and isinstance(error, ServiceResponseError): + # Errors that occur after the request has been started, so we should + # assume that the server began processing it. + settings['read'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + else: + # Incrementing because of a server error like a 500 in + # status_forcelist and a the given method is in the whitelist + if response: + settings['status'] -= 1 + settings['history'].append(RequestHistory(request, http_response=response)) + + if not is_exhausted(settings): + if request.method not in ['PUT'] and settings['retry_secondary']: + self._set_next_host_location(settings, request) + + # rewind the request body if it is a stream + if request.body and hasattr(request.body, 'read'): + # no position was saved, then retry would not work + if settings['body_position'] is None: + return False + try: + # attempt to rewind the body to the initial position + request.body.seek(settings['body_position'], SEEK_SET) + except (UnsupportedOperation, ValueError): + # if body is not seekable, then retry would not work + return False + settings['count'] += 1 + return True + return False + + def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(StorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(StorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/policies_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/policies_async.py new file mode 100644 index 0000000..e0926b8 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/policies_async.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import asyncio +import random +import logging +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline.policies import AsyncHTTPPolicy +from azure.core.exceptions import AzureError + +from .policies import is_retry, StorageRetryPolicy + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +async def retry_hook(settings, **kwargs): + if settings['hook']: + if asyncio.iscoroutine(settings['hook']): + await settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + else: + settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + + +class AsyncStorageResponseHook(AsyncHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(AsyncStorageResponseHook, self).__init__() + + async def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = await self.next.send(request) + await response.http_response.load_body() + + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + if asyncio.iscoroutine(response_callback): + await response_callback(response) + else: + response_callback(response) + request.context['response_callback'] = response_callback + return response + +class AsyncStorageRetryPolicy(StorageRetryPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + async def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + await transport.sleep(backoff) + + async def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = await self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + await self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + await self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(AsyncStorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(AsyncStorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/request_handlers.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/request_handlers.py new file mode 100644 index 0000000..37354d7 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/request_handlers.py @@ -0,0 +1,273 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) + +import logging +from os import fstat +from io import (SEEK_END, SEEK_SET, UnsupportedOperation) + +import isodate + +from azure.core.exceptions import raise_with_traceback + + +_LOGGER = logging.getLogger(__name__) + +_REQUEST_DELIMITER_PREFIX = "batch_" +_HTTP1_1_IDENTIFIER = "HTTP/1.1" +_HTTP_LINE_ENDING = "\r\n" + + +def serialize_iso(attr): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: ValueError if format invalid. + """ + if not attr: + return None + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, + utc.tm_hour, utc.tm_min, utc.tm_sec) + return date + 'Z' + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise_with_traceback(ValueError, msg, err) + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise_with_traceback(TypeError, msg, err) + + +def get_length(data): + length = None + # Check if object implements the __len__ method, covers most input cases such as bytearray. + try: + length = len(data) + except: # pylint: disable=bare-except + pass + + if not length: + # Check if the stream is a file-like stream object. + # If so, calculate the size using the file descriptor. + try: + fileno = data.fileno() + except (AttributeError, UnsupportedOperation): + pass + else: + try: + return fstat(fileno).st_size + except OSError: + # Not a valid fileno, may be possible requests returned + # a socket number? + pass + + # If the stream is seekable and tell() is implemented, calculate the stream size. + try: + current_position = data.tell() + data.seek(0, SEEK_END) + length = data.tell() - current_position + data.seek(current_position, SEEK_SET) + except (AttributeError, UnsupportedOperation): + pass + + return length + + +def read_length(data): + try: + if hasattr(data, 'read'): + read_data = b'' + for chunk in iter(lambda: data.read(4096), b""): + read_data += chunk + return len(read_data), read_data + if hasattr(data, '__iter__'): + read_data = b'' + for chunk in data: + read_data += chunk + return len(read_data), read_data + except: # pylint: disable=bare-except + pass + raise ValueError("Unable to calculate content length, please specify.") + + +def validate_and_format_range_headers( + start_range, end_range, start_range_required=True, + end_range_required=True, check_content_md5=False, align_to_page=False): + # If end range is provided, start range must be provided + if (start_range_required or end_range is not None) and start_range is None: + raise ValueError("start_range value cannot be None.") + if end_range_required and end_range is None: + raise ValueError("end_range value cannot be None.") + + # Page ranges must be 512 aligned + if align_to_page: + if start_range is not None and start_range % 512 != 0: + raise ValueError("Invalid page blob start_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(start_range)) + if end_range is not None and end_range % 512 != 511: + raise ValueError("Invalid page blob end_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(end_range)) + + # Format based on whether end_range is present + range_header = None + if end_range is not None: + range_header = 'bytes={0}-{1}'.format(start_range, end_range) + elif start_range is not None: + range_header = "bytes={0}-".format(start_range) + + # Content MD5 can only be provided for a complete range less than 4MB in size + range_validation = None + if check_content_md5: + if start_range is None or end_range is None: + raise ValueError("Both start and end range requied for MD5 content validation.") + if end_range - start_range > 4 * 1024 * 1024: + raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") + range_validation = 'true' + + return range_header, range_validation + + +def add_metadata_headers(metadata=None): + # type: (Optional[Dict[str, str]]) -> Dict[str, str] + headers = {} + if metadata: + for key, value in metadata.items(): + headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value + return headers + + +def serialize_batch_body(requests, batch_id): + """ + -- + + -- + (repeated as needed) + ---- + + Serializes the requests in this batch to a single HTTP mixed/multipart body. + + :param list[~azure.core.pipeline.transport.HttpRequest] requests: + a list of sub-request for the batch request + :param str batch_id: + to be embedded in batch sub-request delimiter + :return: The body bytes for this batch. + """ + + if requests is None or len(requests) == 0: + raise ValueError('Please provide sub-request(s) for this batch request') + + delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') + newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') + batch_body = list() + + content_index = 0 + for request in requests: + request.headers.update({ + "Content-ID": str(content_index), + "Content-Length": str(0) + }) + batch_body.append(delimiter_bytes) + batch_body.append(_make_body_from_sub_request(request)) + batch_body.append(newline_bytes) + content_index += 1 + + batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) + # final line of body MUST have \r\n at the end, or it will not be properly read by the service + batch_body.append(newline_bytes) + + return bytes().join(batch_body) + + +def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): + """ + Gets the delimiter used for this batch request's mixed/multipart HTTP format. + + :param str batch_id: + Randomly generated id + :param bool is_prepend_dashes: + Whether to include the starting dashes. Used in the body, but non on defining the delimiter. + :param bool is_append_dashes: + Whether to include the ending dashes. Used in the body on the closing delimiter only. + :return: The delimiter, WITHOUT a trailing newline. + """ + + prepend_dashes = '--' if is_prepend_dashes else '' + append_dashes = '--' if is_append_dashes else '' + + return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes + + +def _make_body_from_sub_request(sub_request): + """ + Content-Type: application/http + Content-ID: + Content-Transfer-Encoding: (if present) + + HTTP/ +
:
(repeated as necessary) + Content-Length: + (newline if content length > 0) + (if content length > 0) + + Serializes an http request. + + :param ~azure.core.pipeline.transport.HttpRequest sub_request: + Request to serialize. + :return: The serialized sub-request in bytes + """ + + # put the sub-request's headers into a list for efficient str concatenation + sub_request_body = list() + + # get headers for ease of manipulation; remove headers as they are used + headers = sub_request.headers + + # append opening headers + sub_request_body.append("Content-Type: application/http") + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-ID: ") + sub_request_body.append(headers.pop("Content-ID", "")) + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-Transfer-Encoding: binary") + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + # append HTTP verb and path and query and HTTP version + sub_request_body.append(sub_request.method) + sub_request_body.append(' ') + sub_request_body.append(sub_request.url) + sub_request_body.append(' ') + sub_request_body.append(_HTTP1_1_IDENTIFIER) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) + for header_name, header_value in headers.items(): + if header_value is not None: + sub_request_body.append(header_name) + sub_request_body.append(": ") + sub_request_body.append(header_value) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/response_handlers.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/response_handlers.py new file mode 100644 index 0000000..4b591dd --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/response_handlers.py @@ -0,0 +1,162 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging + +from azure.core.pipeline.policies import ContentDecodePolicy +from azure.core.exceptions import ( + HttpResponseError, + ResourceNotFoundError, + ResourceModifiedError, + ResourceExistsError, + ClientAuthenticationError, + DecodeError) + +from .parser import _to_utc_datetime +from .models import StorageErrorCode, UserDelegationKey, get_enum_value + + +if TYPE_CHECKING: + from datetime import datetime + from azure.core.exceptions import AzureError + + +_LOGGER = logging.getLogger(__name__) + + +class PartialBatchErrorException(HttpResponseError): + """There is a partial failure in batch operations. + + :param str message: The message of the exception. + :param response: Server response to be deserialized. + :param list parts: A list of the parts in multipart response. + """ + + def __init__(self, message, response, parts): + self.parts = parts + super(PartialBatchErrorException, self).__init__(message=message, response=response) + + +def parse_length_from_content_range(content_range): + ''' + Parses the blob length from the content range header: bytes 1-3/65537 + ''' + if content_range is None: + return None + + # First, split in space and take the second half: '1-3/65537' + # Next, split on slash and take the second half: '65537' + # Finally, convert to an int: 65537 + return int(content_range.split(' ', 1)[1].split('/', 1)[1]) + + +def normalize_headers(headers): + normalized = {} + for key, value in headers.items(): + if key.startswith('x-ms-'): + key = key[5:] + normalized[key.lower().replace('-', '_')] = get_enum_value(value) + return normalized + + +def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument + raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} + return {k[10:]: v for k, v in raw_metadata.items()} + + +def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers) + + +def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers), deserialized + + +def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return response.http_response.location_mode, deserialized + + +def process_storage_error(storage_error): + # If storage_error is one of the two then it has already been processed and serialized to the specific exception. + if isinstance(storage_error, (PartialBatchErrorException, ClientAuthenticationError)): + raise storage_error + raise_error = HttpResponseError + error_code = storage_error.response.headers.get('x-ms-error-code') + error_message = storage_error.message + additional_data = {} + try: + error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) + if error_body: + for info in error_body.iter(): + if info.tag.lower() == 'code': + error_code = info.text + elif info.tag.lower() == 'message': + error_message = info.text + else: + additional_data[info.tag] = info.text + except DecodeError: + pass + + try: + if error_code: + error_code = StorageErrorCode(error_code) + if error_code in [StorageErrorCode.condition_not_met, + StorageErrorCode.blob_overwritten]: + raise_error = ResourceModifiedError + if error_code in [StorageErrorCode.invalid_authentication_info, + StorageErrorCode.authentication_failed]: + raise_error = ClientAuthenticationError + if error_code in [StorageErrorCode.resource_not_found, + StorageErrorCode.cannot_verify_copy_source, + StorageErrorCode.blob_not_found, + StorageErrorCode.queue_not_found, + StorageErrorCode.container_not_found, + StorageErrorCode.parent_not_found, + StorageErrorCode.share_not_found]: + raise_error = ResourceNotFoundError + if error_code in [StorageErrorCode.account_already_exists, + StorageErrorCode.account_being_created, + StorageErrorCode.resource_already_exists, + StorageErrorCode.resource_type_mismatch, + StorageErrorCode.blob_already_exists, + StorageErrorCode.queue_already_exists, + StorageErrorCode.container_already_exists, + StorageErrorCode.container_being_deleted, + StorageErrorCode.queue_being_deleted, + StorageErrorCode.share_already_exists, + StorageErrorCode.share_being_deleted]: + raise_error = ResourceExistsError + except ValueError: + # Got an unknown error code + pass + + try: + error_message += "\nErrorCode:{}".format(error_code.value) + except AttributeError: + error_message += "\nErrorCode:{}".format(error_code) + for name, info in additional_data.items(): + error_message += "\n{}:{}".format(name, info) + + error = raise_error(message=error_message, response=storage_error.response) + error.error_code = error_code + error.additional_info = additional_data + error.raise_with_traceback() + + +def parse_to_internal_user_delegation_key(service_user_delegation_key): + internal_user_delegation_key = UserDelegationKey() + internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid + internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid + internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) + internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) + internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service + internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version + internal_user_delegation_key.value = service_user_delegation_key.value + return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/shared_access_signature.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/shared_access_signature.py new file mode 100644 index 0000000..07aad5f --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/shared_access_signature.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from datetime import date + +from .parser import _str, _to_utc_datetime +from .constants import X_MS_VERSION +from . import sign_string, url_quote + + +class QueryStringConstants(object): + SIGNED_SIGNATURE = 'sig' + SIGNED_PERMISSION = 'sp' + SIGNED_START = 'st' + SIGNED_EXPIRY = 'se' + SIGNED_RESOURCE = 'sr' + SIGNED_IDENTIFIER = 'si' + SIGNED_IP = 'sip' + SIGNED_PROTOCOL = 'spr' + SIGNED_VERSION = 'sv' + SIGNED_CACHE_CONTROL = 'rscc' + SIGNED_CONTENT_DISPOSITION = 'rscd' + SIGNED_CONTENT_ENCODING = 'rsce' + SIGNED_CONTENT_LANGUAGE = 'rscl' + SIGNED_CONTENT_TYPE = 'rsct' + START_PK = 'spk' + START_RK = 'srk' + END_PK = 'epk' + END_RK = 'erk' + SIGNED_RESOURCE_TYPES = 'srt' + SIGNED_SERVICES = 'ss' + SIGNED_OID = 'skoid' + SIGNED_TID = 'sktid' + SIGNED_KEY_START = 'skt' + SIGNED_KEY_EXPIRY = 'ske' + SIGNED_KEY_SERVICE = 'sks' + SIGNED_KEY_VERSION = 'skv' + + # for ADLS + SIGNED_AUTHORIZED_OID = 'saoid' + SIGNED_UNAUTHORIZED_OID = 'suoid' + SIGNED_CORRELATION_ID = 'scid' + SIGNED_DIRECTORY_DEPTH = 'sdd' + + @staticmethod + def to_list(): + return [ + QueryStringConstants.SIGNED_SIGNATURE, + QueryStringConstants.SIGNED_PERMISSION, + QueryStringConstants.SIGNED_START, + QueryStringConstants.SIGNED_EXPIRY, + QueryStringConstants.SIGNED_RESOURCE, + QueryStringConstants.SIGNED_IDENTIFIER, + QueryStringConstants.SIGNED_IP, + QueryStringConstants.SIGNED_PROTOCOL, + QueryStringConstants.SIGNED_VERSION, + QueryStringConstants.SIGNED_CACHE_CONTROL, + QueryStringConstants.SIGNED_CONTENT_DISPOSITION, + QueryStringConstants.SIGNED_CONTENT_ENCODING, + QueryStringConstants.SIGNED_CONTENT_LANGUAGE, + QueryStringConstants.SIGNED_CONTENT_TYPE, + QueryStringConstants.START_PK, + QueryStringConstants.START_RK, + QueryStringConstants.END_PK, + QueryStringConstants.END_RK, + QueryStringConstants.SIGNED_RESOURCE_TYPES, + QueryStringConstants.SIGNED_SERVICES, + QueryStringConstants.SIGNED_OID, + QueryStringConstants.SIGNED_TID, + QueryStringConstants.SIGNED_KEY_START, + QueryStringConstants.SIGNED_KEY_EXPIRY, + QueryStringConstants.SIGNED_KEY_SERVICE, + QueryStringConstants.SIGNED_KEY_VERSION, + # for ADLS + QueryStringConstants.SIGNED_AUTHORIZED_OID, + QueryStringConstants.SIGNED_UNAUTHORIZED_OID, + QueryStringConstants.SIGNED_CORRELATION_ID, + QueryStringConstants.SIGNED_DIRECTORY_DEPTH, + ] + + +class SharedAccessSignature(object): + ''' + Provides a factory for creating account access + signature tokens with an account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + :param str x_ms_version: + The service version used to generate the shared access signatures. + ''' + self.account_name = account_name + self.account_key = account_key + self.x_ms_version = x_ms_version + + def generate_account(self, services, resource_types, permission, expiry, start=None, + ip=None, protocol=None): + ''' + Generates a shared access signature for the account. + Use the returned signature with the sas_token parameter of the service + or to create a new account object. + + :param ResourceTypes resource_types: + Specifies the resource types that are accessible with the account + SAS. You can combine values to provide access to more than one + resource type. + :param AccountSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. You can combine + values to provide more than one permission. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + ''' + sas = _SharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_account(services, resource_types) + sas.add_account_signature(self.account_name, self.account_key) + + return sas.get_token() + + +class _SharedAccessHelper(object): + def __init__(self): + self.query_dict = {} + + def _add_query(self, name, val): + if val: + self.query_dict[name] = _str(val) if val is not None else None + + def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): + if isinstance(start, date): + start = _to_utc_datetime(start) + + if isinstance(expiry, date): + expiry = _to_utc_datetime(expiry) + + self._add_query(QueryStringConstants.SIGNED_START, start) + self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) + self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) + self._add_query(QueryStringConstants.SIGNED_IP, ip) + self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) + self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) + + def add_resource(self, resource): + self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) + + def add_id(self, policy_id): + self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) + + def add_account(self, services, resource_types): + self._add_query(QueryStringConstants.SIGNED_SERVICES, services) + self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) + + def add_override_response_headers(self, cache_control, + content_disposition, + content_encoding, + content_language, + content_type): + self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) + self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) + self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) + self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) + self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) + + def add_account_signature(self, account_name, account_key): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + string_to_sign = \ + (account_name + '\n' + + get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + + get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + + get_value_to_append(QueryStringConstants.SIGNED_START) + + get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + get_value_to_append(QueryStringConstants.SIGNED_IP) + + get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(QueryStringConstants.SIGNED_VERSION)) + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key, string_to_sign)) + + def get_token(self): + return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/uploads.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/uploads.py new file mode 100644 index 0000000..941a90f --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/uploads.py @@ -0,0 +1,603 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from concurrent import futures +from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) +from threading import Lock +from itertools import islice +from math import ceil + +import six + +from azure.core.tracing.common import with_current_context + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." + + +def _parallel_uploads(executor, uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(executor.submit(with_current_context(uploader), next_chunk)) + except StopIteration: + break + + # Wait for the remaining uploads to finish + done, _running = futures.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + validate_content=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + validate_content=validate_content, + **kwargs) + if parallel: + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + executor.submit(with_current_context(uploader.process_chunk), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + executor.submit(with_current_context(uploader.process_substream_block), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] + if any(range_ids): + return sorted(range_ids) + return [] + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b"" + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError("Blob data should be of type bytes.") + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b"" or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + def _update_progress(self, length): + if self.progress_lock is not None: + with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = self._upload_chunk(chunk_offset, chunk_data) + self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield index, SubStream(self.stream, index, length, lock) + + def process_substream_block(self, block_data): + return self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + def _upload_substream_block(self, index, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_substream_block_with_progress(self, index, block_stream): + range_id = self._upload_substream_block(index, block_stream) + self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop("modified_access_conditions", None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + self.service.stage_block( + block_id, + len(chunk_data), + chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return index, block_id + + def _upload_substream_block(self, index, block_stream): + try: + block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) + self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + return not any(bytearray(chunk_data)) + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = self.service.upload_pages( + body=chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + def _upload_substream_block(self, index, block_stream): + pass + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + self.current_length = int(self.response_headers["blob_append_offset"]) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + def _upload_substream_block(self, index, block_stream): + pass + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + self.response_headers = self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + def _upload_substream_block(self, index, block_stream): + try: + self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response + + # TODO: Implement this method. + def _upload_substream_block(self, index, block_stream): + pass + + +class SubStream(IOBase): + + def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): + # Python 2.7: file-like objects created with open() typically support seek(), but are not + # derivations of io.IOBase and thus do not implement seekable(). + # Python > 3.0: file-like objects created with open() are derived from io.IOBase. + try: + # only the main thread runs this, so there's no need grabbing the lock + wrapped_stream.seek(0, SEEK_CUR) + except: + raise ValueError("Wrapped stream must support seek().") + + self._lock = lockObj + self._wrapped_stream = wrapped_stream + self._position = 0 + self._stream_begin_index = stream_begin_index + self._length = length + self._buffer = BytesIO() + + # we must avoid buffering more than necessary, and also not use up too much memory + # so the max buffer size is capped at 4MB + self._max_buffer_size = ( + length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE + ) + self._current_buffer_start = 0 + self._current_buffer_size = 0 + super(SubStream, self).__init__() + + def __len__(self): + return self._length + + def close(self): + if self._buffer: + self._buffer.close() + self._wrapped_stream = None + IOBase.close(self) + + def fileno(self): + return self._wrapped_stream.fileno() + + def flush(self): + pass + + def read(self, size=None): + if self.closed: # pylint: disable=using-constant-test + raise ValueError("Stream is closed.") + + if size is None: + size = self._length - self._position + + # adjust if out of bounds + if size + self._position >= self._length: + size = self._length - self._position + + # return fast + if size == 0 or self._buffer.closed: + return b"" + + # attempt first read from the read buffer and update position + read_buffer = self._buffer.read(size) + bytes_read = len(read_buffer) + bytes_remaining = size - bytes_read + self._position += bytes_read + + # repopulate the read buffer from the underlying stream to fulfill the request + # ensure the seek and read operations are done atomically (only if a lock is provided) + if bytes_remaining > 0: + with self._buffer: + # either read in the max buffer size specified on the class + # or read in just enough data for the current block/sub stream + current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) + + # lock is only defined if max_concurrency > 1 (parallel uploads) + if self._lock: + with self._lock: + # reposition the underlying stream to match the start of the data to read + absolute_position = self._stream_begin_index + self._position + self._wrapped_stream.seek(absolute_position, SEEK_SET) + # If we can't seek to the right location, our read will be corrupted so fail fast. + if self._wrapped_stream.tell() != absolute_position: + raise IOError("Stream failed to seek to the desired location.") + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + else: + absolute_position = self._stream_begin_index + self._position + # It's possible that there's connection problem during data transfer, + # so when we retry we don't want to read from current position of wrapped stream, + # instead we should seek to where we want to read from. + if self._wrapped_stream.tell() != absolute_position: + self._wrapped_stream.seek(absolute_position, SEEK_SET) + + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + + if buffer_from_stream: + # update the buffer with new data from the wrapped stream + # we need to note down the start position and size of the buffer, in case seek is performed later + self._buffer = BytesIO(buffer_from_stream) + self._current_buffer_start = self._position + self._current_buffer_size = len(buffer_from_stream) + + # read the remaining bytes from the new buffer and update position + second_read_buffer = self._buffer.read(bytes_remaining) + read_buffer += second_read_buffer + self._position += len(second_read_buffer) + + return read_buffer + + def readable(self): + return True + + def readinto(self, b): + raise UnsupportedOperation + + def seek(self, offset, whence=0): + if whence is SEEK_SET: + start_index = 0 + elif whence is SEEK_CUR: + start_index = self._position + elif whence is SEEK_END: + start_index = self._length + offset = -offset + else: + raise ValueError("Invalid argument for the 'whence' parameter.") + + pos = start_index + offset + + if pos > self._length: + pos = self._length + elif pos < 0: + pos = 0 + + # check if buffer is still valid + # if not, drop buffer + if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: + self._buffer.close() + self._buffer = BytesIO() + else: # if yes seek to correct position + delta = pos - self._current_buffer_start + self._buffer.seek(delta, SEEK_SET) + + self._position = pos + return pos + + def seekable(self): + return True + + def tell(self): + return self._position + + def write(self): + raise UnsupportedOperation + + def writelines(self): + raise UnsupportedOperation + + def writeable(self): + return False + + +class IterStreamer(object): + """ + File-like streaming iterator. + """ + + def __init__(self, generator, encoding="UTF-8"): + self.generator = generator + self.iterator = iter(generator) + self.leftover = b"" + self.encoding = encoding + + def __len__(self): + return self.generator.__len__() + + def __iter__(self): + return self.iterator + + def seekable(self): + return False + + def __next__(self): + return next(self.iterator) + + next = __next__ # Python 2 compatibility. + + def tell(self, *args, **kwargs): + raise UnsupportedOperation("Data generator does not support tell.") + + def seek(self, *args, **kwargs): + raise UnsupportedOperation("Data generator is unseekable.") + + def read(self, size): + data = self.leftover + count = len(self.leftover) + try: + while count < size: + chunk = self.__next__() + if isinstance(chunk, six.text_type): + chunk = chunk.encode(self.encoding) + data += chunk + count += len(chunk) + # This means count < size and what's leftover will be returned in this call. + except StopIteration: + self.leftover = b"" + + if count >= size: + self.leftover = data[size:] + + return data[:size] diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared/uploads_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/uploads_async.py new file mode 100644 index 0000000..5ed192b --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared/uploads_async.py @@ -0,0 +1,395 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +import asyncio +from asyncio import Lock +from itertools import islice +import threading + +from math import ceil + +import six + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder +from .uploads import SubStream, IterStreamer # pylint: disable=unused-import + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' + + +async def _parallel_uploads(uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(asyncio.ensure_future(uploader(next_chunk))) + except StopIteration: + break + + # Wait for the remaining uploads to finish + if running: + done, _running = await asyncio.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +async def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + asyncio.ensure_future(uploader.process_chunk(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [] + for chunk in uploader.get_chunk_streams(): + range_ids.append(await uploader.process_chunk(chunk)) + + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +async def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + asyncio.ensure_future(uploader.process_substream_block(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [] + for block in uploader.get_substream_blocks(): + range_ids.append(await uploader.process_substream_block(block)) + if any(range_ids): + return sorted(range_ids) + return + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = threading.Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b'' + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError('Blob data should be of type bytes.') + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b'' or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + async def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + async def _update_progress(self, length): + if self.progress_lock is not None: + async with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + async def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = await self._upload_chunk(chunk_offset, chunk_data) + await self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield index, SubStream(self.stream, index, length, lock) + + async def process_substream_block(self, block_data): + return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + async def _upload_substream_block(self, index, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_substream_block_with_progress(self, index, block_stream): + range_id = await self._upload_substream_block(index, block_stream) + await self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop('modified_access_conditions', None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + await self.service.stage_block( + block_id, + len(chunk_data), + body=chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + return index, block_id + + async def _upload_substream_block(self, index, block_stream): + try: + block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) + await self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + for each_byte in chunk_data: + if each_byte not in [0, b'\x00']: + return False + return True + + async def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = await self.service.upload_pages( + body=chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + async def _upload_substream_block(self, index, block_stream): + pass + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = await self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + self.current_length = int(self.response_headers['blob_append_offset']) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = await self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + async def _upload_substream_block(self, index, block_stream): + pass + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + self.response_headers = await self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + async def _upload_substream_block(self, index, block_stream): + try: + await self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = await self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + return range_id, response + + # TODO: Implement this method. + async def _upload_substream_block(self, index, block_stream): + pass diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_shared_access_signature.py b/azure/multiapi/storagev2/blob/v2020_06_12/_shared_access_signature.py new file mode 100644 index 0000000..890ef1b --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_shared_access_signature.py @@ -0,0 +1,596 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, TYPE_CHECKING +) + +from ._shared import sign_string, url_quote +from ._shared.constants import X_MS_VERSION +from ._shared.models import Services +from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, \ + QueryStringConstants + +if TYPE_CHECKING: + from datetime import datetime + from ..blob import ( + ResourceTypes, + AccountSasPermissions, + UserDelegationKey, + ContainerSasPermissions, + BlobSasPermissions + ) + + +class BlobQueryStringConstants(object): + SIGNED_TIMESTAMP = 'snapshot' + + +class BlobSharedAccessSignature(SharedAccessSignature): + ''' + Provides a factory for creating blob and container access + signature tokens with a common account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key=None, user_delegation_key=None): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key: + Instead of an account key, the user could pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished by calling get_user_delegation_key on any Blob service object. + ''' + super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) + self.user_delegation_key = user_delegation_key + + def generate_blob(self, container_name, blob_name, snapshot=None, version_id=None, permission=None, + expiry=None, start=None, policy_id=None, ip=None, protocol=None, + cache_control=None, content_disposition=None, + content_encoding=None, content_language=None, + content_type=None, **kwargs): + ''' + Generates a shared access signature for the blob or one of its snapshots. + Use the returned signature with the sas_token parameter of any BlobService. + + :param str container_name: + Name of container. + :param str blob_name: + Name of blob. + :param str snapshot: + The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to grant permission. + :param BlobSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_blob_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + resource_path = container_name + '/' + blob_name + + sas = _BlobSharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(policy_id) + + resource = 'bs' if snapshot else 'b' + resource = 'bv' if version_id else resource + resource = 'd' if kwargs.pop("is_directory", None) else resource + sas.add_resource(resource) + + sas.add_timestamp(snapshot or version_id) + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_info_for_hns_account(**kwargs) + sas.add_resource_signature(self.account_name, self.account_key, resource_path, + user_delegation_key=self.user_delegation_key) + + return sas.get_token() + + def generate_container(self, container_name, permission=None, expiry=None, + start=None, policy_id=None, ip=None, protocol=None, + cache_control=None, content_disposition=None, + content_encoding=None, content_language=None, + content_type=None, **kwargs): + ''' + Generates a shared access signature for the container. + Use the returned signature with the sas_token parameter of any BlobService. + + :param str container_name: + Name of container. + :param ContainerSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_blob_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + sas = _BlobSharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(policy_id) + sas.add_resource('c') + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_info_for_hns_account(**kwargs) + sas.add_resource_signature(self.account_name, self.account_key, container_name, + user_delegation_key=self.user_delegation_key) + return sas.get_token() + + +class _BlobSharedAccessHelper(_SharedAccessHelper): + + def add_timestamp(self, timestamp): + self._add_query(BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp) + + def add_info_for_hns_account(self, **kwargs): + self._add_query(QueryStringConstants.SIGNED_DIRECTORY_DEPTH, kwargs.pop('sdd', None)) + self._add_query(QueryStringConstants.SIGNED_AUTHORIZED_OID, kwargs.pop('preauthorized_agent_object_id', None)) + self._add_query(QueryStringConstants.SIGNED_UNAUTHORIZED_OID, kwargs.pop('agent_object_id', None)) + self._add_query(QueryStringConstants.SIGNED_CORRELATION_ID, kwargs.pop('correlation_id', None)) + + def get_value_to_append(self, query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None): + # pylint: disable = no-member + if path[0] != '/': + path = '/' + path + + canonicalized_resource = '/blob/' + account_name + path + '\n' + + # Form the string to sign from shared_access_policy and canonicalized + # resource. The order of values is important. + string_to_sign = \ + (self.get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + self.get_value_to_append(QueryStringConstants.SIGNED_START) + + self.get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + canonicalized_resource) + + if user_delegation_key is not None: + self._add_query(QueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid) + self._add_query(QueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid) + self._add_query(QueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start) + self._add_query(QueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry) + self._add_query(QueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service) + self._add_query(QueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version) + + string_to_sign += \ + (self.get_value_to_append(QueryStringConstants.SIGNED_OID) + + self.get_value_to_append(QueryStringConstants.SIGNED_TID) + + self.get_value_to_append(QueryStringConstants.SIGNED_KEY_START) + + self.get_value_to_append(QueryStringConstants.SIGNED_KEY_EXPIRY) + + self.get_value_to_append(QueryStringConstants.SIGNED_KEY_SERVICE) + + self.get_value_to_append(QueryStringConstants.SIGNED_KEY_VERSION) + + self.get_value_to_append(QueryStringConstants.SIGNED_AUTHORIZED_OID) + + self.get_value_to_append(QueryStringConstants.SIGNED_UNAUTHORIZED_OID) + + self.get_value_to_append(QueryStringConstants.SIGNED_CORRELATION_ID)) + else: + string_to_sign += self.get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) + + string_to_sign += \ + (self.get_value_to_append(QueryStringConstants.SIGNED_IP) + + self.get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + self.get_value_to_append(QueryStringConstants.SIGNED_VERSION) + + self.get_value_to_append(QueryStringConstants.SIGNED_RESOURCE) + + self.get_value_to_append(BlobQueryStringConstants.SIGNED_TIMESTAMP) + + self.get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + + self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + + self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + + self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + + self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) + + # remove the trailing newline + if string_to_sign[-1] == '\n': + string_to_sign = string_to_sign[:-1] + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key if user_delegation_key is None else user_delegation_key.value, + string_to_sign)) + + def get_token(self): + # a conscious decision was made to exclude the timestamp in the generated token + # this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp + exclude = [BlobQueryStringConstants.SIGNED_TIMESTAMP] + return '&'.join(['{0}={1}'.format(n, url_quote(v)) + for n, v in self.query_dict.items() if v is not None and n not in exclude]) + + +def generate_account_sas( + account_name, # type: str + account_key, # type: str + resource_types, # type: Union[ResourceTypes, str] + permission, # type: Union[AccountSasPermissions, str] + expiry, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): # type: (...) -> str + """Generates a shared access signature for the blob service. + + Use the returned signature with the credential parameter of any BlobServiceClient, + ContainerClient or BlobClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + :param resource_types: + Specifies the resource types that are accessible with the account SAS. + :type resource_types: str or ~azure.storage.blob.ResourceTypes + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.blob.AccountSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_sas_token] + :end-before: [END create_sas_token] + :language: python + :dedent: 8 + :caption: Generating a shared access signature. + """ + sas = SharedAccessSignature(account_name, account_key) + return sas.generate_account( + services=Services(blob=True), + resource_types=resource_types, + permission=permission, + expiry=expiry, + start=start, + ip=ip, + **kwargs + ) # type: ignore + + +def generate_container_sas( + account_name, # type: str + container_name, # type: str + account_key=None, # type: Optional[str] + user_delegation_key=None, # type: Optional[UserDelegationKey] + permission=None, # type: Optional[Union[ContainerSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + policy_id=None, # type: Optional[str] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Any + """Generates a shared access signature for a container. + + Use the returned signature with the credential parameter of any BlobServiceClient, + ContainerClient or BlobClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str container_name: + The name of the container. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + Either `account_key` or `user_delegation_key` must be specified. + :param ~azure.storage.blob.UserDelegationKey user_delegation_key: + Instead of an account shared key, the user could pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.blob.ContainerSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :return: A Shared Access Signature (sas) token. + :rtype: str + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START generate_sas_token] + :end-before: [END generate_sas_token] + :language: python + :dedent: 12 + :caption: Generating a sas token. + """ + if not user_delegation_key and not account_key: + raise ValueError("Either user_delegation_key or account_key must be provided.") + + if user_delegation_key: + sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) + else: + sas = BlobSharedAccessSignature(account_name, account_key=account_key) + return sas.generate_container( + container_name, + permission=permission, + expiry=expiry, + start=start, + policy_id=policy_id, + ip=ip, + **kwargs + ) + + +def generate_blob_sas( + account_name, # type: str + container_name, # type: str + blob_name, # type: str + snapshot=None, # type: Optional[str] + account_key=None, # type: Optional[str] + user_delegation_key=None, # type: Optional[UserDelegationKey] + permission=None, # type: Optional[Union[BlobSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + policy_id=None, # type: Optional[str] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Any + """Generates a shared access signature for a blob. + + Use the returned signature with the credential parameter of any BlobServiceClient, + ContainerClient or BlobClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str container_name: + The name of the container. + :param str blob_name: + The name of the blob. + :param str snapshot: + An optional blob snapshot ID. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + Either `account_key` or `user_delegation_key` must be specified. + :param ~azure.storage.blob.UserDelegationKey user_delegation_key: + Instead of an account shared key, the user could pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.blob.BlobSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + :func:`~azure.storage.blob.ContainerClient.set_container_access_policy()`. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str version_id: + An optional blob version ID. This parameter is only for versioning enabled account + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + if not user_delegation_key and not account_key: + raise ValueError("Either user_delegation_key or account_key must be provided.") + version_id = kwargs.pop('version_id', None) + if version_id and snapshot: + raise ValueError("snapshot and version_id cannot be set at the same time.") + if user_delegation_key: + sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) + else: + sas = BlobSharedAccessSignature(account_name, account_key=account_key) + return sas.generate_blob( + container_name, + blob_name, + snapshot=snapshot, + version_id=version_id, + permission=permission, + expiry=expiry, + start=start, + policy_id=policy_id, + ip=ip, + **kwargs + ) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2020_06_12/_upload_helpers.py new file mode 100644 index 0000000..94313f6 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_upload_helpers.py @@ -0,0 +1,295 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from io import SEEK_SET, UnsupportedOperation +from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import + +import six +from azure.core.exceptions import ResourceExistsError, ResourceModifiedError, HttpResponseError + +from ._shared.response_handlers import ( + process_storage_error, + return_response_headers) +from ._shared.models import StorageErrorCode +from ._shared.uploads import ( + upload_data_chunks, + upload_substream_blocks, + BlockBlobChunkUploader, + PageBlobChunkUploader, + AppendBlobChunkUploader) +from ._shared.encryption import generate_blob_encryption_data, encrypt_blob +from ._generated.models import ( + BlockLookupList, + AppendPositionAccessConditions, + ModifiedAccessConditions, +) + +if TYPE_CHECKING: + from datetime import datetime # pylint: disable=unused-import + BlobLeaseClient = TypeVar("BlobLeaseClient") + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' + + +def _convert_mod_error(error): + message = error.message.replace( + "The condition specified using HTTP conditional header(s) is not met.", + "The specified blob already exists.") + message = message.replace("ConditionNotMet", "BlobAlreadyExists") + overwrite_error = ResourceExistsError( + message=message, + response=error.response, + error=error) + overwrite_error.error_code = StorageErrorCode.blob_already_exists + raise overwrite_error + + +def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument + return any([ + modified_access_conditions.if_modified_since, + modified_access_conditions.if_unmodified_since, + modified_access_conditions.if_none_match, + modified_access_conditions.if_match + ]) + + +def upload_block_blob( # pylint: disable=too-many-locals + client=None, + data=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if not overwrite and not _any_conditions(**kwargs): + kwargs['modified_access_conditions'].if_none_match = '*' + adjusted_count = length + if (encryption_options.get('key') is not None) and (adjusted_count is not None): + adjusted_count += (16 - (length % 16)) + blob_headers = kwargs.pop('blob_headers', None) + tier = kwargs.pop('standard_blob_tier', None) + blob_tags_string = kwargs.pop('blob_tags_string', None) + + # Do single put if the size is smaller than or equal config.max_single_put_size + if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): + try: + data = data.read(length) + if not isinstance(data, six.binary_type): + raise TypeError('Blob data should be of type bytes.') + except AttributeError: + pass + if encryption_options.get('key'): + encryption_data, data = encrypt_blob(data, encryption_options['key']) + headers['x-ms-meta-encryptiondata'] = encryption_data + return client.upload( + body=data, + content_length=adjusted_count, + blob_http_headers=blob_headers, + headers=headers, + cls=return_response_headers, + validate_content=validate_content, + data_stream_total=adjusted_count, + upload_stream_current=0, + tier=tier.value if tier else None, + blob_tags_string=blob_tags_string, + **kwargs) + + use_original_upload_path = blob_settings.use_byte_buffer or \ + validate_content or encryption_options.get('required') or \ + blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ + hasattr(stream, 'seekable') and not stream.seekable() or \ + not hasattr(stream, 'seek') or not hasattr(stream, 'tell') + + if use_original_upload_path: + if encryption_options.get('key'): + cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) + headers['x-ms-meta-encryptiondata'] = encryption_data + encryption_options['cek'] = cek + encryption_options['vector'] = iv + block_ids = upload_data_chunks( + service=client, + uploader_class=BlockBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + encryption_options=encryption_options, + headers=headers, + **kwargs + ) + else: + block_ids = upload_substream_blocks( + service=client, + uploader_class=BlockBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + headers=headers, + **kwargs + ) + + block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) + block_lookup.latest = block_ids + return client.commit_block_list( + block_lookup, + blob_http_headers=blob_headers, + cls=return_response_headers, + validate_content=validate_content, + headers=headers, + tier=tier.value if tier else None, + blob_tags_string=blob_tags_string, + **kwargs) + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceModifiedError as mod_error: + if not overwrite: + _convert_mod_error(mod_error) + raise + + +def upload_page_blob( + client=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if not overwrite and not _any_conditions(**kwargs): + kwargs['modified_access_conditions'].if_none_match = '*' + if length is None or length < 0: + raise ValueError("A content length must be specified for a Page Blob.") + if length % 512 != 0: + raise ValueError("Invalid page blob size: {0}. " + "The size must be aligned to a 512-byte boundary.".format(length)) + if kwargs.get('premium_page_blob_tier'): + premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') + try: + headers['x-ms-access-tier'] = premium_page_blob_tier.value + except AttributeError: + headers['x-ms-access-tier'] = premium_page_blob_tier + if encryption_options and encryption_options.get('data'): + headers['x-ms-meta-encryptiondata'] = encryption_options['data'] + blob_tags_string = kwargs.pop('blob_tags_string', None) + + response = client.create( + content_length=0, + blob_content_length=length, + blob_sequence_number=None, + blob_http_headers=kwargs.pop('blob_headers', None), + blob_tags_string=blob_tags_string, + cls=return_response_headers, + headers=headers, + **kwargs) + if length == 0: + return response + + kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) + return upload_data_chunks( + service=client, + uploader_class=PageBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_page_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + encryption_options=encryption_options, + headers=headers, + **kwargs) + + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceModifiedError as mod_error: + if not overwrite: + _convert_mod_error(mod_error) + raise + + +def upload_append_blob( # pylint: disable=unused-argument + client=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if length == 0: + return {} + blob_headers = kwargs.pop('blob_headers', None) + append_conditions = AppendPositionAccessConditions( + max_size=kwargs.pop('maxsize_condition', None), + append_position=None) + blob_tags_string = kwargs.pop('blob_tags_string', None) + + try: + if overwrite: + client.create( + content_length=0, + blob_http_headers=blob_headers, + headers=headers, + blob_tags_string=blob_tags_string, + **kwargs) + return upload_data_chunks( + service=client, + uploader_class=AppendBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + append_position_access_conditions=append_conditions, + headers=headers, + **kwargs) + except HttpResponseError as error: + if error.response.status_code != 404: + raise + # rewind the request body if it is a stream + if hasattr(stream, 'read'): + try: + # attempt to rewind the body to the initial position + stream.seek(0, SEEK_SET) + except UnsupportedOperation: + # if body is not seekable, then retry would not work + raise error + client.create( + content_length=0, + blob_http_headers=blob_headers, + headers=headers, + blob_tags_string=blob_tags_string, + **kwargs) + return upload_data_chunks( + service=client, + uploader_class=AppendBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + append_position_access_conditions=append_conditions, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/_version.py b/azure/multiapi/storagev2/blob/v2020_06_12/_version.py new file mode 100644 index 0000000..a30b2ba --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/_version.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +VERSION = "12.8.1" diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/__init__.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/__init__.py new file mode 100644 index 0000000..33c1031 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/aio/__init__.py @@ -0,0 +1,141 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os + +from .._models import BlobType +from .._shared.policies_async import ExponentialRetry, LinearRetry +from ._blob_client_async import BlobClient +from ._container_client_async import ContainerClient +from ._blob_service_client_async import BlobServiceClient +from ._lease_async import BlobLeaseClient +from ._download_async import StorageStreamDownloader + + +async def upload_blob_to_url( + blob_url, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + credential=None, # type: Any + **kwargs): + # type: (...) -> dict[str, Any] + """Upload data to a given URL + + The data will be uploaded as a block blob. + + :param str blob_url: + The full URI to the blob. This can also include a SAS token. + :param data: + The data to upload. This can be bytes, text, an iterable or a file-like object. + :type data: bytes or str or Iterable + :param credential: + The credentials with which to authenticate. This is optional if the + blob URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword bool overwrite: + Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob_to_url will overwrite any existing data. If set to False, the + operation will fail with a ResourceExistsError. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword dict(str,str) metadata: + Name-value pairs associated with the blob as metadata. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword str encoding: + Encoding to use if text is supplied as input. Defaults to UTF-8. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: dict(str, Any) + """ + async with BlobClient.from_blob_url(blob_url, credential=credential) as client: + return await client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) + + +async def _download_to_stream(client, handle, **kwargs): + """Download data to specified open file-handle.""" + stream = await client.download_blob(**kwargs) + await stream.readinto(handle) + + +async def download_blob_from_url( + blob_url, # type: str + output, # type: str + credential=None, # type: Any + **kwargs): + # type: (...) -> None + """Download the contents of a blob to a local file or stream. + + :param str blob_url: + The full URI to the blob. This can also include a SAS token. + :param output: + Where the data should be downloaded to. This could be either a file path to write to, + or an open IO handle to write to. + :type output: str or writable stream + :param credential: + The credentials with which to authenticate. This is optional if the + blob URL already has a SAS token or the blob is public. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword bool overwrite: + Whether the local file should be overwritten if it already exists. The default value is + `False` - in which case a ValueError will be raised if the file already exists. If set to + `True`, an attempt will be made to write to the existing file. If a stream handle is passed + in, this value is ignored. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :keyword int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :rtype: None + """ + overwrite = kwargs.pop('overwrite', False) + async with BlobClient.from_blob_url(blob_url, credential=credential) as client: + if hasattr(output, 'write'): + await _download_to_stream(client, output, **kwargs) + else: + if not overwrite and os.path.isfile(output): + raise ValueError("The file '{}' already exists.".format(output)) + with open(output, 'wb') as file_handle: + await _download_to_stream(client, file_handle, **kwargs) + + +__all__ = [ + 'upload_blob_to_url', + 'download_blob_from_url', + 'BlobServiceClient', + 'ContainerClient', + 'BlobClient', + 'BlobLeaseClient', + 'ExponentialRetry', + 'LinearRetry', + 'StorageStreamDownloader' +] diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_blob_client_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_blob_client_async.py new file mode 100644 index 0000000..a87a409 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_blob_client_async.py @@ -0,0 +1,2483 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines, invalid-overridden-method +from functools import partial +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, + TYPE_CHECKING +) + +from azure.core.pipeline import AsyncPipeline + +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.exceptions import ResourceNotFoundError, HttpResponseError, ResourceExistsError + +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.policies_async import ExponentialRetry +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._deserialize import get_page_ranges_result, parse_tags, deserialize_pipeline_response_into_cls +from .._serialize import get_modify_conditions, get_api_version, get_access_conditions +from .._generated.aio import AzureBlobStorage +from .._generated.models import CpkInfo +from .._deserialize import deserialize_blob_properties +from .._blob_client import BlobClient as BlobClientBase +from ._upload_helpers import ( + upload_block_blob, + upload_append_blob, + upload_page_blob) +from .._models import BlobType, BlobBlock, BlobProperties +from ._lease_async import BlobLeaseClient +from ._download_async import StorageStreamDownloader + + +if TYPE_CHECKING: + from datetime import datetime + from .._models import ( # pylint: disable=unused-import + ContentSettings, + PremiumPageBlobTier, + StandardBlobTier, + SequenceNumberAction + ) + + +class BlobClient(AsyncStorageAccountHostsMixin, BlobClientBase): # pylint: disable=too-many-public-methods + """A client to interact with a specific blob, although that blob may not yet exist. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the blob, + use the :func:`from_blob_url` classmethod. + :param container_name: The container name for the blob. + :type container_name: str + :param blob_name: The name of the blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob_name: str + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication_async.py + :start-after: [START create_blob_client] + :end-before: [END create_blob_client] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a URL to a public blob (no auth needed). + + .. literalinclude:: ../samples/blob_samples_authentication_async.py + :start-after: [START create_blob_client_sas_url] + :end-before: [END create_blob_client_sas_url] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a SAS URL to a blob. + """ + def __init__( + self, account_url, # type: str + container_name, # type: str + blob_name, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(BlobClient, self).__init__( + account_url, + container_name=container_name, + blob_name=blob_name, + snapshot=snapshot, + credential=credential, + **kwargs) + self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) + default_api_version = self._client._config.version # pylint: disable=protected-access + self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access + self._loop = kwargs.get('loop', None) + + @distributed_trace_async + async def get_account_information(self, **kwargs): # type: ignore + # type: (Optional[int]) -> Dict[str, str] + """Gets information related to the storage account in which the blob resides. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + """ + try: + return await self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_blob_from_url(self, source_url, **kwargs): + # type: (str, Any) -> Dict[str, Any] + """ + Creates a new Block Blob where the content of the blob is read from a given URL. + The content of an existing blob is overwritten with the new blob. + + :param str source_url: + A URL of up to 2 KB in length that specifies a file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. + :keyword bool include_source_blob_properties: + Indicates if properties from the source blob should be copied. Defaults to True. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + :paramtype tags: dict(str, str) + :keyword bytearray source_content_md5: + Specify the md5 that is used to verify the integrity of the source bytes. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword destination_lease: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + """ + options = self._upload_blob_from_url_options( + source_url=self._encode_source_url(source_url), + **kwargs) + try: + return await self._client.block_blob.put_blob_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_blob( + self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] + length=None, # type: Optional[int] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Any + """Creates a new blob from a data source with automatic chunking. + + :param data: The blob data to upload. + :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be + either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. The exception to the above is with Append + blob types: if set to False and the data already exists, an error will not be raised + and the data will be appended to the existing blob. If set overwrite=True, then the existing + append blob will be deleted, and a new one created. Defaults to False. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + If specified, upload_blob only succeeds if the + blob's lease is active and matches this ID. + Required if the blob has an active lease. + :paramtype: ~azure.storage.blob.aio.BlobLeaseClient + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int max_concurrency: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world_async.py + :start-after: [START upload_a_blob] + :end-before: [END upload_a_blob] + :language: python + :dedent: 16 + :caption: Upload a blob to the container. + """ + options = self._upload_blob_options( + data, + blob_type=blob_type, + length=length, + metadata=metadata, + **kwargs) + if blob_type == BlobType.BlockBlob: + return await upload_block_blob(**options) + if blob_type == BlobType.PageBlob: + return await upload_page_blob(**options) + return await upload_append_blob(**options) + + @distributed_trace_async + async def download_blob(self, offset=None, length=None, **kwargs): + # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader + """Downloads a blob to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the blob into + a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. + + :param int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to download. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, download_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword str encoding: + Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.blob.aio.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world_async.py + :start-after: [START download_a_blob] + :end-before: [END download_a_blob] + :language: python + :dedent: 16 + :caption: Download a blob. + """ + options = self._download_blob_options( + offset=offset, + length=length, + **kwargs) + downloader = StorageStreamDownloader(**options) + await downloader._setup() # pylint: disable=protected-access + return downloader + + @distributed_trace_async + async def delete_blob(self, delete_snapshots=None, **kwargs): + # type: (str, Any) -> None + """Marks the specified blob for deletion. + + The blob is later deleted during garbage collection. + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the delete_blob() + operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob + and retains the blob for a specified number of days. + After the specified number of days, the blob's data is removed from the service during garbage collection. + Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` + option. Soft-deleted blob can be restored using :func:`undelete` operation. + + :param str delete_snapshots: + Required if the blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword lease: + Required if the blob has an active lease. If specified, delete_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world_async.py + :start-after: [START delete_blob] + :end-before: [END delete_blob] + :language: python + :dedent: 16 + :caption: Delete a blob. + """ + options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) + try: + await self._client.blob.delete(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def undelete_blob(self, **kwargs): + # type: (Any) -> None + """Restores soft-deleted blobs or snapshots. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START undelete_blob] + :end-before: [END undelete_blob] + :language: python + :dedent: 12 + :caption: Undeleting a blob. + """ + try: + await self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a blob exists with the defined parameters, and returns + False otherwise. + + :kwarg str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to check if it exists. + :kwarg int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + try: + await self._client.blob.get_properties( + snapshot=self.snapshot, + **kwargs) + return True + # Encrypted with CPK + except ResourceExistsError: + return True + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceNotFoundError: + return False + + @distributed_trace_async + async def get_blob_properties(self, **kwargs): + # type: (Any) -> BlobProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the blob. It does not return the content of the blob. + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to get properties. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: BlobProperties + :rtype: ~azure.storage.blob.BlobProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START get_blob_properties] + :end-before: [END get_blob_properties] + :language: python + :dedent: 12 + :caption: Getting the properties for a blob. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + try: + cls_method = kwargs.pop('cls', None) + if cls_method: + kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method) + blob_props = await self._client.blob.get_properties( + timeout=kwargs.pop('timeout', None), + version_id=kwargs.pop('version_id', None), + snapshot=self.snapshot, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=kwargs.pop('cls', None) or deserialize_blob_properties, + cpk_info=cpk_info, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + blob_props.name = self.blob_name + if isinstance(blob_props, BlobProperties): + blob_props.container = self.container_name + blob_props.snapshot = self.snapshot + return blob_props # type: ignore + + @distributed_trace_async + async def set_http_headers(self, content_settings=None, **kwargs): + # type: (Optional[ContentSettings], Any) -> None + """Sets system properties on the blob. + + If one property is set for the content_settings, all properties will be overridden. + + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + options = self._set_http_headers_options(content_settings=content_settings, **kwargs) + try: + return await self._client.blob.set_http_headers(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_blob_metadata(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] + """Sets user-defined metadata for the blob as one or more name-value pairs. + + :param metadata: + Dict containing name and value pairs. Each call to this operation + replaces all existing metadata attached to the blob. To remove all + metadata from the blob, call this operation with no metadata headers. + :type metadata: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + """ + options = self._set_blob_metadata_options(metadata=metadata, **kwargs) + try: + return await self._client.blob.set_metadata(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def create_page_blob( # type: ignore + self, size, # type: int + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Creates a new Page Blob of the specified size. + + :param int size: + This specifies the maximum size for the page blob, up to 1 TB. + The page blob size must be aligned to a 512-byte boundary. + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword int sequence_number: + Only for Page blobs. The sequence number is a user-controlled value that you can use to + track requests. The value of the sequence number must be between 0 + and 2^63 - 1.The default value is 0. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict[str, Any] + """ + options = self._create_page_blob_options( + size, + content_settings=content_settings, + metadata=metadata, + premium_page_blob_tier=premium_page_blob_tier, + **kwargs) + try: + return await self._client.page_blob.create(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def create_append_blob(self, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] + """Creates a new Append Blob. + + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict[str, Any] + """ + options = self._create_append_blob_options( + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return await self._client.append_blob.create(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def create_snapshot(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] + """Creates a snapshot of the blob. + + A snapshot is a read-only version of a blob that's taken at a point in time. + It can be read, copied, or deleted, but not modified. Snapshots provide a way + to back up a blob as it appears at a moment in time. + + A snapshot of a blob has the same name as the base blob from which the snapshot + is taken, with a DateTime value appended to indicate the time at which the + snapshot was taken. + + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START create_blob_snapshot] + :end-before: [END create_blob_snapshot] + :language: python + :dedent: 12 + :caption: Create a snapshot of the blob. + """ + options = self._create_snapshot_options(metadata=metadata, **kwargs) + try: + return await self._client.blob.create_snapshot(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): + # type: (str, Optional[Dict[str, str]], bool, Any) -> Any + """Copies a blob asynchronously. + + This operation returns a copy operation + object that can be used to wait on the completion of the operation, + as well as check status or abort the copy operation. + The Blob service copies blobs on a best-effort basis. + + The source blob for a copy operation may be a block blob, an append blob, + or a page blob. If the destination blob already exists, it must be of the + same blob type as the source blob. Any existing destination blob will be + overwritten. The destination blob cannot be modified while a copy operation + is in progress. + + When copying from a page blob, the Blob service creates a destination page + blob of the source blob's length, initially containing all zeroes. Then + the source page ranges are enumerated, and non-empty ranges are copied. + + For a block blob or an append blob, the Blob service creates a committed + blob of zero length before returning from this operation. When copying + from a block blob, all committed blocks and their block IDs are copied. + Uncommitted blocks are not copied. At the end of the copy operation, the + destination blob will have the same committed block count as the source. + + When copying from an append blob, all committed blocks are copied. At the + end of the copy operation, the destination blob will have the same committed + block count as the source. + + For all blob types, you can call status() on the returned polling object + to check the status of the copy operation, or wait() to block until the + operation is complete. The final blob will be committed when the copy completes. + + :param str source_url: + A URL of up to 2 KB in length that specifies a file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :param metadata: + Name-value pairs associated with the blob as metadata. If no name-value + pairs are specified, the operation will copy the metadata from the + source blob or file to the destination blob. If one or more name-value + pairs are specified, the destination blob is created with the specified + metadata, and metadata is not copied from the source blob or file. + :type metadata: dict(str, str) + :param bool incremental_copy: + Copies the snapshot of the source page blob to a destination page blob. + The snapshot is copied such that only the differential changes between + the previously copied snapshot are transferred to the destination. + The copied snapshots are complete copies of the original snapshot and + can be read or copied from as usual. Defaults to False. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source + blob has been modified since the specified date/time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source blob + has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has been modified since the specified date/time. + If the destination blob has not been modified, the Blob service returns + status code 412 (Precondition Failed). + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has not been modified since the specified + date/time. If the destination blob has been modified, the Blob service + returns status code 412 (Precondition Failed). + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword destination_lease: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :paramtype destination_lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword source_lease: + Specify this to perform the Copy Blob operation only if + the lease ID given matches the active lease ID of the source blob. + :paramtype source_lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword bool seal_destination_blob: + Seal the destination append blob. This operation is only for append blob. + + .. versionadded:: 12.4.0 + + :keyword bool requires_sync: + Enforces that the service will not return a response until the copy is complete. + :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). + :rtype: dict[str, str or ~datetime.datetime] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START copy_blob_from_url] + :end-before: [END copy_blob_from_url] + :language: python + :dedent: 16 + :caption: Copy a blob from a URL. + """ + options = self._start_copy_from_url_options( + source_url=self._encode_source_url(source_url), + metadata=metadata, + incremental_copy=incremental_copy, + **kwargs) + try: + if incremental_copy: + return await self._client.page_blob.copy_incremental(**options) + return await self._client.blob.start_copy_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def abort_copy(self, copy_id, **kwargs): + # type: (Union[str, Dict[str, Any], BlobProperties], Any) -> None + """Abort an ongoing copy operation. + + This will leave a destination blob with zero length and full metadata. + This will raise an error if the copy operation has already ended. + + :param copy_id: + The copy operation to abort. This can be either an ID, or an + instance of BlobProperties. + :type copy_id: str or ~azure.storage.blob.BlobProperties + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START abort_copy_blob_from_url] + :end-before: [END abort_copy_blob_from_url] + :language: python + :dedent: 16 + :caption: Abort copying a blob from URL. + """ + options = self._abort_copy_options(copy_id, **kwargs) + try: + await self._client.blob.abort_copy_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): + # type: (int, Optional[str], Any) -> BlobLeaseClient + """Requests a new lease. + + If the blob does not have an active lease, the Blob + Service creates a lease on the blob and returns a new lease. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Blob Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A BlobLeaseClient object. + :rtype: ~azure.storage.blob.aio.BlobLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START acquire_lease_on_blob] + :end-before: [END acquire_lease_on_blob] + :language: python + :dedent: 12 + :caption: Acquiring a lease on a blob. + """ + lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore + await lease.acquire(lease_duration=lease_duration, **kwargs) + return lease + + @distributed_trace_async + async def set_standard_blob_tier(self, standard_blob_tier, **kwargs): + # type: (Union[str, StandardBlobTier], Any) -> None + """This operation sets the tier on a block blob. + + A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + :param standard_blob_tier: + Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', + 'Archive'. The hot tier is optimized for storing data that is accessed + frequently. The cool storage tier is optimized for storing data that + is infrequently accessed and stored for at least a month. The archive + tier is optimized for storing data that is rarely accessed and stored + for at least six months with flexible latency requirements. + :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if standard_blob_tier is None: + raise ValueError("A StandardBlobTier must be specified") + try: + await self._client.blob.set_tier( + tier=standard_blob_tier, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def stage_block( + self, block_id, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> None + """Creates a new block to be committed as part of a blob. + + :param str block_id: A string value that identifies the block. + The string should be less than or equal to 64 bytes in size. + For a given blob, the block_id must be the same size for each block. + :param data: The blob data. + :param int length: Size of the block. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword str encoding: + Defaults to UTF-8. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + options = self._stage_block_options( + block_id, + data, + length=length, + **kwargs) + try: + return await self._client.block_blob.stage_block(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def stage_block_from_url( + self, block_id, # type: str + source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + source_content_md5=None, # type: Optional[Union[bytes, bytearray]] + **kwargs + ): + # type: (...) -> None + """Creates a new block to be committed as part of a blob where + the contents are read from a URL. + + :param str block_id: A string value that identifies the block. + The string should be less than or equal to 64 bytes in size. + For a given blob, the block_id must be the same size for each block. + :param str source_url: The URL. + :param int source_offset: + Start of byte range to use for the block. + Must be set if source length is provided. + :param int source_length: The size of the block in bytes. + :param bytearray source_content_md5: + Specify the md5 calculated for the range of + bytes that must be read from the copy source. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + options = self._stage_block_from_url_options( + block_id, + source_url=self._encode_source_url(source_url), + source_offset=source_offset, + source_length=source_length, + source_content_md5=source_content_md5, + **kwargs) + try: + return await self._client.block_blob.stage_block_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_block_list(self, block_list_type="committed", **kwargs): + # type: (Optional[str], Any) -> Tuple[List[BlobBlock], List[BlobBlock]] + """The Get Block List operation retrieves the list of blocks that have + been uploaded as part of a block blob. + + :param str block_list_type: + Specifies whether to return the list of committed + blocks, the list of uncommitted blocks, or both lists together. + Possible values include: 'committed', 'uncommitted', 'all' + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A tuple of two lists - committed and uncommitted blocks + :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) + """ + access_conditions = get_access_conditions(kwargs.pop('kease', None)) + mod_conditions = get_modify_conditions(kwargs) + try: + blocks = await self._client.block_blob.get_block_list( + list_type=block_list_type, + snapshot=self.snapshot, + timeout=kwargs.pop('timeout', None), + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return self._get_block_list_result(blocks) + + @distributed_trace_async + async def commit_block_list( # type: ignore + self, block_list, # type: List[BlobBlock] + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """The Commit Block List operation writes a blob by specifying the list of + block IDs that make up the blob. + + :param list block_list: + List of Blockblobs. + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict[str, str] + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._commit_block_list_options( + block_list, + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return await self._client.block_blob.commit_block_list(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): + # type: (Union[str, PremiumPageBlobTier], **Any) -> None + """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. + + :param premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if premium_page_blob_tier is None: + raise ValueError("A PremiumPageBlobTiermust be specified") + try: + await self._client.blob.set_tier( + tier=premium_page_blob_tier, + timeout=kwargs.pop('timeout', None), + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_blob_tags(self, tags=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. + Each call to this operation replaces all existing tags attached to the blob. To remove all + tags from the blob, call this operation with no tags set. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :param tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + :type tags: dict(str, str) + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + :keyword bool validate_content: + If true, calculates an MD5 hash of the tags content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + options = self._set_blob_tags_options(tags=tags, **kwargs) + try: + return await self._client.blob.set_tags(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_blob_tags(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """The Get Tags operation enables users to get tags on a blob or specific blob version, but not snapshot. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to add tags to. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, str] + """ + options = self._get_blob_tags_options(**kwargs) + try: + _, tags = await self._client.blob.get_tags(**options) + return parse_tags(tags) # pylint: disable=protected-access + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_page_ranges( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] + **kwargs + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a Page Blob or snapshot + of a page blob. + + :param int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param str previous_snapshot_diff: + The snapshot diff parameter that contains an opaque DateTime value that + specifies a previous blob snapshot to be compared + against a more recent snapshot or the current blob. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. + The first element are filled page ranges, the 2nd element is cleared page ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_page_ranges_options( + offset=offset, + length=length, + previous_snapshot_diff=previous_snapshot_diff, + **kwargs) + try: + if previous_snapshot_diff: + ranges = await self._client.page_blob.get_page_ranges_diff(**options) + else: + ranges = await self._client.page_blob.get_page_ranges(**options) + except HttpResponseError as error: + process_storage_error(error) + return get_page_ranges_result(ranges) + + @distributed_trace_async + async def get_page_range_diff_for_managed_disk( + self, previous_snapshot_url, # type: str + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a managed disk or snapshot. + + .. note:: + This operation is only available for managed disk accounts. + + .. versionadded:: 12.2.0 + This operation was introduced in API version '2019-07-07'. + + :param previous_snapshot_url: + Specifies the URL of a previous snapshot of the managed disk. + The response will only contain pages that were changed between the target blob and + its previous snapshot. + :param int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. + The first element are filled page ranges, the 2nd element is cleared page ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_page_ranges_options( + offset=offset, + length=length, + prev_snapshot_url=previous_snapshot_url, + **kwargs) + try: + ranges = await self._client.page_blob.get_page_ranges_diff(**options) + except HttpResponseError as error: + process_storage_error(error) + return get_page_ranges_result(ranges) + + @distributed_trace_async + async def set_sequence_number( # type: ignore + self, sequence_number_action, # type: Union[str, SequenceNumberAction] + sequence_number=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the blob sequence number. + + :param str sequence_number_action: + This property indicates how the service should modify the blob's sequence + number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. + :param str sequence_number: + This property sets the blob's sequence number. The sequence number is a + user-controlled property that you can use to track requests and manage + concurrency issues. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._set_sequence_number_options( + sequence_number_action, sequence_number=sequence_number, **kwargs) + try: + return await self._client.page_blob.update_sequence_number(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def resize_blob(self, size, **kwargs): + # type: (int, Any) -> Dict[str, Union[str, datetime]] + """Resizes a page blob to the specified size. + + If the specified value is less than the current size of the blob, + then all pages above the specified value are cleared. + + :param int size: + Size used to resize blob. Maximum size for a page blob is up to 1 TB. + The page blob size must be aligned to a 512-byte boundary. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._resize_blob_options(size, **kwargs) + try: + return await self._client.page_blob.resize(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_page( # type: ignore + self, page, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """The Upload Pages operation writes a range of pages to a page blob. + + :param bytes page: + Content of the page. + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._upload_page_options( + page=page, + offset=offset, + length=length, + **kwargs) + try: + return await self._client.page_blob.upload_pages(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_pages_from_url(self, source_url, # type: str + offset, # type: int + length, # type: int + source_offset, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """ + The Upload Pages operation writes a range of pages to a page blob where + the contents are read from a URL. + + :param str source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (length-offset). + :keyword bytes source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + + options = self._upload_pages_from_url_options( + source_url=self._encode_source_url(source_url), + offset=offset, + length=length, + source_offset=source_offset, + **kwargs + ) + try: + return await self._client.page_blob.upload_pages_from_url(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def clear_page(self, offset, length, **kwargs): + # type: (int, int, Any) -> Dict[str, Union[str, datetime]] + """Clears a range of pages. + + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._clear_page_options(offset, length, **kwargs) + try: + return await self._client.page_blob.clear_pages(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def append_block( # type: ignore + self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """Commits a new block of data to the end of the existing append blob. + + :param data: + Content of the block. + :param int length: + Size of the block in bytes. + :keyword bool validate_content: + If true, calculates an MD5 hash of the block content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._append_block_options( + data, + length=length, + **kwargs + ) + try: + return await self._client.append_blob.append_block(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async() + async def append_block_from_url(self, copy_source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """ + Creates a new block to be committed as part of a blob, where the contents are read from a source url. + + :param str copy_source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + :param int source_length: + This indicates the end of the range of bytes that has to be taken from the copy source. + :keyword bytearray source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the + AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + options = self._append_block_from_url_options( + copy_source_url=self._encode_source_url(copy_source_url), + source_offset=source_offset, + source_length=source_length, + **kwargs + ) + try: + return await self._client.append_blob.append_block_from_url(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async() + async def seal_append_blob(self, **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """The Seal operation seals the Append Blob to make it read-only. + + .. versionadded:: 12.4.0 + + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._seal_append_blob_options(**kwargs) + try: + return await self._client.append_blob.seal(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _get_container_client(self): # pylint: disable=client-method-missing-kwargs + # type: (...) -> ContainerClient + """Get a client to interact with the blob's parent container. + + The container need not already exist. Defaults to current blob's credentials. + + :returns: A ContainerClient. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START get_container_client_from_blob_client] + :end-before: [END get_container_client_from_blob_client] + :language: python + :dedent: 12 + :caption: Get container client from blob object. + """ + from ._container_client_async import ContainerClient + if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + else: + _pipeline = self._pipeline # pylint: disable = protected-access + return ContainerClient( + "{}://{}".format(self.scheme, self.primary_hostname), container_name=self.container_name, + credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, + _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, + _pipeline=_pipeline, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_blob_service_client_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_blob_service_client_async.py new file mode 100644 index 0000000..d3d72ba --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_blob_service_client_async.py @@ -0,0 +1,678 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, + TYPE_CHECKING +) + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import AsyncPipeline +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.async_paging import AsyncItemPaged + +from .._shared.models import LocationMode +from .._shared.policies_async import ExponentialRetry +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._shared.parser import _to_utc_datetime +from .._shared.response_handlers import parse_to_internal_user_delegation_key +from .._generated.aio import AzureBlobStorage +from .._generated.models import StorageServiceProperties, KeyInfo +from .._blob_service_client import BlobServiceClient as BlobServiceClientBase +from ._container_client_async import ContainerClient +from ._blob_client_async import BlobClient +from .._models import ContainerProperties +from .._deserialize import service_stats_deserialize, service_properties_deserialize +from .._serialize import get_api_version +from ._models import ContainerPropertiesPaged, FilteredBlobPaged + +if TYPE_CHECKING: + from datetime import datetime + from .._shared.models import AccountSasPermissions, ResourceTypes, UserDelegationKey + from ._lease_async import BlobLeaseClient + from .._models import ( + BlobProperties, + PublicAccess, + BlobAnalyticsLogging, + Metrics, + CorsRule, + RetentionPolicy, + StaticWebsite, + ) + + +class BlobServiceClient(AsyncStorageAccountHostsMixin, BlobServiceClientBase): + """A client to interact with the Blob Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete containers within the account. + For operations relating to a specific container or blob, clients for those entities + can also be retrieved using the `get_client` functions. + + :param str account_url: + The URL to the blob storage account. Any other entities included + in the URL path (e.g. container or blob) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication_async.py + :start-after: [START create_blob_service_client] + :end-before: [END create_blob_service_client] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient with account url and credential. + + .. literalinclude:: ../samples/blob_samples_authentication_async.py + :start-after: [START create_blob_service_client_oauth] + :end-before: [END create_blob_service_client_oauth] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient with Azure Identity credentials. + """ + + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(BlobServiceClient, self).__init__( + account_url, + credential=credential, + **kwargs) + self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) + default_api_version = self._client._config.version # pylint: disable=protected-access + self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access + self._loop = kwargs.get('loop', None) + + @distributed_trace_async + async def get_user_delegation_key(self, key_start_time, # type: datetime + key_expiry_time, # type: datetime + **kwargs # type: Any + ): + # type: (...) -> UserDelegationKey + """ + Obtain a user delegation key for the purpose of signing SAS tokens. + A token credential must be present on the service object for this request to succeed. + + :param ~datetime.datetime key_start_time: + A DateTime value. Indicates when the key becomes valid. + :param ~datetime.datetime key_expiry_time: + A DateTime value. Indicates when the key stops being valid. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The user delegation key. + :rtype: ~azure.storage.blob.UserDelegationKey + """ + key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) + timeout = kwargs.pop('timeout', None) + try: + user_delegation_key = await self._client.service.get_user_delegation_key(key_info=key_info, + timeout=timeout, + **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore + + @distributed_trace_async + async def get_account_information(self, **kwargs): + # type: (Any) -> Dict[str, str] + """Gets information related to the storage account. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START get_blob_service_account_info] + :end-before: [END get_blob_service_account_info] + :language: python + :dedent: 12 + :caption: Getting account information for the blob service. + """ + try: + return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_service_stats(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Retrieves statistics related to replication for the Blob service. + + It is only available when read-access geo-redundant replication is enabled for + the storage account. + + With geo-redundant replication, Azure Storage maintains your data durable + in two locations. In both locations, Azure Storage constantly maintains + multiple healthy replicas of your data. The location where you read, + create, update, or delete data is the primary storage account location. + The primary location exists in the region you choose at the time you + create an account via the Azure Management Azure classic portal, for + example, North Central US. The location to which your data is replicated + is the secondary location. The secondary location is automatically + determined based on the location of the primary; it is in a second data + center that resides in the same region as the primary location. Read-only + access is available from the secondary location, if read-access geo-redundant + replication is enabled for your storage account. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The blob service stats. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START get_blob_service_stats] + :end-before: [END get_blob_service_stats] + :language: python + :dedent: 12 + :caption: Getting service stats for the blob service. + """ + timeout = kwargs.pop('timeout', None) + try: + stats = await self._client.service.get_statistics( # type: ignore + timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) + return service_stats_deserialize(stats) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_service_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An object containing blob service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START get_blob_service_properties] + :end-before: [END get_blob_service_properties] + :language: python + :dedent: 12 + :caption: Getting service properties for the blob service. + """ + timeout = kwargs.pop('timeout', None) + try: + service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) + return service_properties_deserialize(service_props) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_service_properties( + self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] + hour_metrics=None, # type: Optional[Metrics] + minute_metrics=None, # type: Optional[Metrics] + cors=None, # type: Optional[List[CorsRule]] + target_version=None, # type: Optional[str] + delete_retention_policy=None, # type: Optional[RetentionPolicy] + static_website=None, # type: Optional[StaticWebsite] + **kwargs + ): + # type: (...) -> None + """Sets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + If an element (e.g. analytics_logging) is left as None, the + existing settings on the service for that functionality are preserved. + + :param analytics_logging: + Groups the Azure Analytics Logging settings. + :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for blobs. + :type hour_metrics: ~azure.storage.blob.Metrics + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for blobs. + :type minute_metrics: ~azure.storage.blob.Metrics + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list[~azure.storage.blob.CorsRule] + :param str target_version: + Indicates the default version to use for requests if an incoming + request's version is not specified. + :param delete_retention_policy: + The delete retention policy specifies whether to retain deleted blobs. + It also specifies the number of days and versions of blob to keep. + :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy + :param static_website: + Specifies whether the static website feature is enabled, + and if yes, indicates the index document and 404 error document to use. + :type static_website: ~azure.storage.blob.StaticWebsite + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START set_blob_service_properties] + :end-before: [END set_blob_service_properties] + :language: python + :dedent: 12 + :caption: Setting service properties for the blob service. + """ + if all(parameter is None for parameter in [ + analytics_logging, hour_metrics, minute_metrics, cors, + target_version, delete_retention_policy, static_website]): + raise ValueError("set_service_properties should be called with at least one parameter") + + props = StorageServiceProperties( + logging=analytics_logging, + hour_metrics=hour_metrics, + minute_metrics=minute_metrics, + cors=cors, + default_service_version=target_version, + delete_retention_policy=delete_retention_policy, + static_website=static_website + ) + timeout = kwargs.pop('timeout', None) + try: + await self._client.service.set_properties(props, timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_containers( + self, name_starts_with=None, # type: Optional[str] + include_metadata=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> AsyncItemPaged[ContainerProperties] + """Returns a generator to list the containers under the specified account. + + The generator will lazily follow the continuation tokens returned by + the service and stop when all containers have been returned. + + :param str name_starts_with: + Filters the results to return only containers whose names + begin with the specified prefix. + :param bool include_metadata: + Specifies that container metadata to be returned in the response. + The default value is `False`. + :keyword bool include_deleted: + Specifies that deleted containers to be returned in the response. This is for container restore enabled + account. The default value is `False`. + .. versionadded:: 12.4.0 + :keyword int results_per_page: + The maximum number of container names to retrieve per API + call. If the request does not specify the server will return up to 5,000 items. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) of ContainerProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.ContainerProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_list_containers] + :end-before: [END bsc_list_containers] + :language: python + :dedent: 16 + :caption: Listing the containers in the blob service. + """ + include = ['metadata'] if include_metadata else [] + include_deleted = kwargs.pop('include_deleted', None) + if include_deleted: + include.append("deleted") + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.service.list_containers_segment, + prefix=name_starts_with, + include=include, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + page_iterator_class=ContainerPropertiesPaged + ) + + @distributed_trace + def find_blobs_by_tags(self, filter_expression, **kwargs): + # type: (str, **Any) -> AsyncItemPaged[FilteredBlob] + """The Filter Blobs operation enables callers to list blobs across all + containers whose tags match a given search expression. Filter blobs + searches across all containers within a storage account but can be + scoped within the expression to a single container. + + :param str filter_expression: + The expression to find blobs whose tags matches the specified condition. + eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" + To specify a container, eg. "@container='containerName' and \"Name\"='C'" + :keyword int results_per_page: + The max result per page when paginating. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.FilteredBlob] + """ + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.service.filter_blobs, + where=filter_expression, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=FilteredBlobPaged) + + @distributed_trace_async + async def create_container( + self, name, # type: str + metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[Union[PublicAccess, str]] + **kwargs + ): + # type: (...) -> ContainerClient + """Creates a new container under the specified account. + + If the container with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created container. + + :param str name: The name of the container to create. + :param metadata: + A dict with name-value pairs to associate with the + container as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + Possible values include: 'container', 'blob'. + :type public_access: str or ~azure.storage.blob.PublicAccess + :keyword container_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + + .. versionadded:: 12.2.0 + + :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.aio.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_create_container] + :end-before: [END bsc_create_container] + :language: python + :dedent: 16 + :caption: Creating a container in the blob service. + """ + container = self.get_container_client(name) + timeout = kwargs.pop('timeout', None) + kwargs.setdefault('merge_span', True) + await container.create_container( + metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) + return container + + @distributed_trace_async + async def delete_container( + self, container, # type: Union[ContainerProperties, str] + lease=None, # type: Optional[Union[BlobLeaseClient, str]] + **kwargs + ): + # type: (...) -> None + """Marks the specified container for deletion. + + The container and any blobs contained within it are later deleted during garbage collection. + If the container is not found, a ResourceNotFoundError will be raised. + + :param container: + The container to delete. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :param lease: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_delete_container] + :end-before: [END bsc_delete_container] + :language: python + :dedent: 16 + :caption: Deleting a container in the blob service. + """ + container = self.get_container_client(container) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + await container.delete_container( # type: ignore + lease=lease, + timeout=timeout, + **kwargs) + + @distributed_trace_async + async def _rename_container(self, name, new_name, **kwargs): + # type: (str, str, **Any) -> ContainerClient + """Renames a container. + + Operation is successful only if the source container exists. + + :param str name: + The name of the container to rename. + :param str new_name: + The new container name the user wants to rename to. + :keyword lease: + Specify this to perform only if the lease ID given + matches the active lease ID of the source container. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.ContainerClient + """ + renamed_container = self.get_container_client(new_name) + lease = kwargs.pop('lease', None) + try: + kwargs['source_lease_id'] = lease.id # type: str + except AttributeError: + kwargs['source_lease_id'] = lease + try: + await renamed_container._client.container.rename(name, **kwargs) # pylint: disable = protected-access + return renamed_container + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): + # type: (str, str, **Any) -> ContainerClient + """Restores soft-deleted container. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :param str deleted_container_name: + Specifies the name of the deleted container to restore. + :param str deleted_container_version: + Specifies the version of the deleted container to restore. + :keyword str new_name: + The new name for the deleted container to be restored to. + If not specified deleted_container_name will be used as the restored container name. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.aio.ContainerClient + """ + new_name = kwargs.pop('new_name', None) + container = self.get_container_client(new_name or deleted_container_name) + try: + await container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access + deleted_container_version=deleted_container_version, + timeout=kwargs.pop('timeout', None), **kwargs) + return container + except HttpResponseError as error: + process_storage_error(error) + + def get_container_client(self, container): + # type: (Union[ContainerProperties, str]) -> ContainerClient + """Get a client to interact with the specified container. + + The container need not already exist. + + :param container: + The container. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :returns: A ContainerClient. + :rtype: ~azure.storage.blob.aio.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_get_container_client] + :end-before: [END bsc_get_container_client] + :language: python + :dedent: 12 + :caption: Getting the container client to interact with a specific container. + """ + try: + container_name = container.name + except AttributeError: + container_name = container + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ContainerClient( + self.url, container_name=container_name, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function, loop=self._loop) + + def get_blob_client( + self, container, # type: Union[ContainerProperties, str] + blob, # type: Union[BlobProperties, str] + snapshot=None # type: Optional[Union[Dict[str, Any], str]] + ): + # type: (...) -> BlobClient + """Get a client to interact with the specified blob. + + The blob need not already exist. + + :param container: + The container that the blob is in. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :param blob: + The blob with which to interact. This can either be the name of the blob, + or an instance of BlobProperties. + :type blob: str or ~azure.storage.blob.BlobProperties + :param snapshot: + The optional blob snapshot on which to operate. This can either be the ID of the snapshot, + or a dictionary output returned by + :func:`~azure.storage.blob.aio.BlobClient.create_snapshot()`. + :type snapshot: str or dict(str, Any) + :returns: A BlobClient. + :rtype: ~azure.storage.blob.aio.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_get_blob_client] + :end-before: [END bsc_get_blob_client] + :language: python + :dedent: 16 + :caption: Getting the blob client to interact with a specific blob. + """ + try: + container_name = container.name + except AttributeError: + container_name = container + + try: + blob_name = blob.name + except AttributeError: + blob_name = blob + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return BlobClient( # type: ignore + self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_container_client_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_container_client_async.py new file mode 100644 index 0000000..93cc877 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_container_client_async.py @@ -0,0 +1,1210 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, AnyStr, Dict, List, IO, AsyncIterator, + TYPE_CHECKING +) + +from azure.core.exceptions import HttpResponseError, ResourceNotFoundError +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.async_paging import AsyncItemPaged +from azure.core.pipeline import AsyncPipeline +from azure.core.pipeline.transport import AsyncHttpResponse + +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.policies_async import ExponentialRetry +from .._shared.request_handlers import add_metadata_headers, serialize_iso +from .._shared.response_handlers import ( + process_storage_error, + return_response_headers, + return_headers_and_deserialized) +from .._generated.aio import AzureBlobStorage +from .._generated.models import SignedIdentifier +from .._deserialize import deserialize_container_properties +from .._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions +from .._container_client import ContainerClient as ContainerClientBase, _get_blob_name +from .._models import ContainerProperties, BlobType, BlobProperties # pylint: disable=unused-import +from ._list_blobs_helper import BlobPropertiesPaged, BlobPrefix +from ._lease_async import BlobLeaseClient +from ._blob_client_async import BlobClient + +if TYPE_CHECKING: + from .._models import PublicAccess + from ._download_async import StorageStreamDownloader + from datetime import datetime + from .._models import ( # pylint: disable=unused-import + AccessPolicy, + StandardBlobTier, + PremiumPageBlobTier) + + +class ContainerClient(AsyncStorageAccountHostsMixin, ContainerClientBase): + """A client to interact with a specific container, although that container + may not yet exist. + + For operations relating to a specific blob within this container, a blob client can be + retrieved using the :func:`~get_blob_client` function. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the container, + use the :func:`from_container_url` classmethod. + :param container_name: + The name of the container for the blob. + :type container_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START create_container_client_from_service] + :end-before: [END create_container_client_from_service] + :language: python + :dedent: 8 + :caption: Get a ContainerClient from an existing BlobServiceClient. + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START create_container_client_sasurl] + :end-before: [END create_container_client_sasurl] + :language: python + :dedent: 12 + :caption: Creating the container client directly. + """ + def __init__( + self, account_url, # type: str + container_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(ContainerClient, self).__init__( + account_url, + container_name=container_name, + credential=credential, + **kwargs) + self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) + default_api_version = self._client._config.version # pylint: disable=protected-access + self._client._config.version = get_api_version(kwargs, default_api_version) # pylint: disable=protected-access + self._loop = kwargs.get('loop', None) + + @distributed_trace_async + async def create_container(self, metadata=None, public_access=None, **kwargs): + # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None + """ + Creates a new container under the specified account. If the container + with the same name already exists, the operation fails. + + :param metadata: + A dict with name_value pairs to associate with the + container as metadata. Example:{'Category':'test'} + :type metadata: dict[str, str] + :param ~azure.storage.blob.PublicAccess public_access: + Possible values include: 'container', 'blob'. + :keyword container_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + + .. versionadded:: 12.2.0 + + :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START create_container] + :end-before: [END create_container] + :language: python + :dedent: 16 + :caption: Creating a container to store blobs. + """ + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + timeout = kwargs.pop('timeout', None) + container_cpk_scope_info = get_container_cpk_scope_info(kwargs) + try: + return await self._client.container.create( # type: ignore + timeout=timeout, + access=public_access, + container_cpk_scope_info=container_cpk_scope_info, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def _rename_container(self, new_name, **kwargs): + # type: (str, **Any) -> ContainerClient + """Renames a container. + + Operation is successful only if the source container exists. + + :param str new_name: + The new container name the user wants to rename to. + :keyword lease: + Specify this to perform only if the lease ID given + matches the active lease ID of the source container. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.ContainerClient + """ + lease = kwargs.pop('lease', None) + try: + kwargs['source_lease_id'] = lease.id # type: str + except AttributeError: + kwargs['source_lease_id'] = lease + try: + renamed_container = ContainerClient( + "{}://{}".format(self.scheme, self.primary_hostname), container_name=new_name, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + await renamed_container._client.container.rename(self.container_name, **kwargs) # pylint: disable = protected-access + return renamed_container + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def delete_container( + self, **kwargs): + # type: (Any) -> None + """ + Marks the specified container for deletion. The container and any blobs + contained within it are later deleted during garbage collection. + + :keyword lease: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START delete_container] + :end-before: [END delete_container] + :language: python + :dedent: 16 + :caption: Delete a container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + mod_conditions = get_modify_conditions(kwargs) + timeout = kwargs.pop('timeout', None) + try: + await self._client.container.delete( + timeout=timeout, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def acquire_lease( + self, lease_duration=-1, # type: int + lease_id=None, # type: Optional[str] + **kwargs): + # type: (...) -> BlobLeaseClient + """ + Requests a new lease. If the container does not have an active lease, + the Blob service creates a lease on the container and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A BlobLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.blob.aio.BlobLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START acquire_lease_on_container] + :end-before: [END acquire_lease_on_container] + :language: python + :dedent: 12 + :caption: Acquiring a lease on the container. + """ + lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + await lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) + return lease + + @distributed_trace_async + async def get_account_information(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """Gets information related to the storage account. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + """ + try: + return await self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_container_properties(self, **kwargs): + # type: (**Any) -> ContainerProperties + """Returns all user-defined metadata and system properties for the specified + container. The data returned does not include the container's list of blobs. + + :keyword lease: + If specified, get_container_properties only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Properties for the specified container within a container object. + :rtype: ~azure.storage.blob.ContainerProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START get_container_properties] + :end-before: [END get_container_properties] + :language: python + :dedent: 16 + :caption: Getting properties on the container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + response = await self._client.container.get_properties( + timeout=timeout, + lease_access_conditions=access_conditions, + cls=deserialize_container_properties, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + response.name = self.container_name + return response # type: ignore + + @distributed_trace_async + async def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a container exists and returns False otherwise. + + :kwarg int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + try: + await self._client.container.get_properties(**kwargs) + return True + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceNotFoundError: + return False + + @distributed_trace_async + async def set_container_metadata( # type: ignore + self, metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + container. Each call to this operation replaces all existing metadata + attached to the container. To remove all metadata from the container, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the container as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_container_metadata only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Container-updated property dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START set_container_metadata] + :end-before: [END set_container_metadata] + :language: python + :dedent: 16 + :caption: Setting metadata on the container. + """ + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + mod_conditions = get_modify_conditions(kwargs) + timeout = kwargs.pop('timeout', None) + try: + return await self._client.container.set_metadata( # type: ignore + timeout=timeout, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def _get_blob_service_client(self): # pylint: disable=client-method-missing-kwargs + # type: (...) -> BlobServiceClient + """Get a client to interact with the container's parent service account. + + Defaults to current container's credentials. + + :returns: A BlobServiceClient. + :rtype: ~azure.storage.blob.BlobServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START get_blob_service_client_from_container_client] + :end-before: [END get_blob_service_client_from_container_client] + :language: python + :dedent: 8 + :caption: Get blob service client from container object. + """ + from ._blob_service_client_async import BlobServiceClient + if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + else: + _pipeline = self._pipeline # pylint: disable = protected-access + return BlobServiceClient( + "{}://{}".format(self.scheme, self.primary_hostname), + credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, + _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function, + _pipeline=_pipeline) + + + @distributed_trace_async + async def get_container_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the specified container. + The permissions indicate whether container data may be accessed publicly. + + :keyword lease: + If specified, get_container_access_policy only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START get_container_access_policy] + :end-before: [END get_container_access_policy] + :language: python + :dedent: 16 + :caption: Getting the access policy on the container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + response, identifiers = await self._client.container.get_access_policy( + timeout=timeout, + lease_access_conditions=access_conditions, + cls=return_headers_and_deserialized, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return { + 'public_access': response.get('blob_public_access'), + 'signed_identifiers': identifiers or [] + } + + @distributed_trace_async + async def set_container_access_policy( + self, signed_identifiers, # type: Dict[str, AccessPolicy] + public_access=None, # type: Optional[Union[str, PublicAccess]] + **kwargs # type: Any + ): # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the permissions for the specified container or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether blobs in a container may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the container. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] + :param ~azure.storage.blob.PublicAccess public_access: + Possible values include: 'container', 'blob'. + :keyword lease: + Required if the container has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified date/time. + :keyword ~datetime.datetime if_unmodified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Container-updated property dict (Etag and last modified). + :rtype: dict[str, str or ~datetime.datetime] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START set_container_access_policy] + :end-before: [END set_container_access_policy] + :language: python + :dedent: 16 + :caption: Setting access policy on the container. + """ + timeout = kwargs.pop('timeout', None) + lease = kwargs.pop('lease', None) + if len(signed_identifiers) > 5: + raise ValueError( + 'Too many access policies provided. The server does not support setting ' + 'more than 5 access policies on a single resource.') + identifiers = [] + for key, value in signed_identifiers.items(): + if value: + value.start = serialize_iso(value.start) + value.expiry = serialize_iso(value.expiry) + identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore + signed_identifiers = identifiers # type: ignore + + mod_conditions = get_modify_conditions(kwargs) + access_conditions = get_access_conditions(lease) + try: + return await self._client.container.set_access_policy( + container_acl=signed_identifiers or None, + timeout=timeout, + access=public_access, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_blobs(self, name_starts_with=None, include=None, **kwargs): + # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> AsyncItemPaged[BlobProperties] + """Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. + + :param str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param list[str] or str include: + Specifies one or more additional datasets to include in the response. + Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'tags'. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START list_blobs_in_container] + :end-before: [END list_blobs_in_container] + :language: python + :dedent: 12 + :caption: List the blobs in the container. + """ + if include and not isinstance(include, list): + include = [include] + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.list_blob_flat_segment, + include=include, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + page_iterator_class=BlobPropertiesPaged + ) + + @distributed_trace + def walk_blobs( + self, name_starts_with=None, # type: Optional[str] + include=None, # type: Optional[Any] + delimiter="/", # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> AsyncItemPaged[BlobProperties] + """Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. This operation will list blobs in accordance with a hierarchy, + as delimited by the specified delimiter character. + + :param str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param list[str] include: + Specifies one or more additional datasets to include in the response. + Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. + :param str delimiter: + When the request includes this parameter, the operation returns a BlobPrefix + element in the response body that acts as a placeholder for all blobs whose + names begin with the same substring up to the appearance of the delimiter + character. The delimiter may be a single character or a string. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] + """ + if include and not isinstance(include, list): + include = [include] + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.list_blob_hierarchy_segment, + delimiter=delimiter, + include=include, + timeout=timeout, + **kwargs) + return BlobPrefix( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + delimiter=delimiter) + + @distributed_trace_async + async def upload_blob( + self, name, # type: Union[str, BlobProperties] + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] + length=None, # type: Optional[int] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> BlobClient + """Creates a new blob from a data source with automatic chunking. + + :param name: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type name: str or ~azure.storage.blob.BlobProperties + :param data: The blob data to upload. + :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be + either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. The exception to the above is with Append + blob types: if set to False and the data already exists, an error will not be raised + and the data will be appended to the existing blob. If set overwrite=True, then the existing + append blob will be deleted, and a new one created. Defaults to False. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the container has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int max_concurrency: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :returns: A BlobClient to interact with the newly uploaded blob. + :rtype: ~azure.storage.blob.aio.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START upload_blob_to_container] + :end-before: [END upload_blob_to_container] + :language: python + :dedent: 12 + :caption: Upload blob to the container. + """ + blob = self.get_blob_client(name) + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + await blob.upload_blob( + data, + blob_type=blob_type, + length=length, + metadata=metadata, + timeout=timeout, + encoding=encoding, + **kwargs + ) + return blob + + @distributed_trace_async + async def delete_blob( + self, blob, # type: Union[str, BlobProperties] + delete_snapshots=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> None + """Marks the specified blob or snapshot for deletion. + + The blob is later deleted during garbage collection. + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the delete_blob + operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot + and retains the blob or snapshot for specified number of days. + After specified number of days, blob's data is removed from the service during garbage collection. + Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` + option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` + + :param blob: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob: str or ~azure.storage.blob.BlobProperties + :param str delete_snapshots: + Required if the blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword lease: + Required if the blob has an active lease. Value can be a Lease object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + blob = self.get_blob_client(blob) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + await blob.delete_blob( # type: ignore + delete_snapshots=delete_snapshots, + timeout=timeout, + **kwargs) + + @distributed_trace_async + async def download_blob(self, blob, offset=None, length=None, **kwargs): + # type: (Union[str, BlobProperties], Optional[int], Optional[int], Any) -> StorageStreamDownloader + """Downloads a blob to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the blob into + a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. + + :param blob: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob: str or ~azure.storage.blob.BlobProperties + :param int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, download_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword str encoding: + Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object. (StorageStreamDownloader) + :rtype: ~azure.storage.blob.aio.StorageStreamDownloader + """ + blob_client = self.get_blob_client(blob) # type: ignore + kwargs.setdefault('merge_span', True) + return await blob_client.download_blob( + offset=offset, + length=length, + **kwargs) + + @distributed_trace_async + async def delete_blobs( # pylint: disable=arguments-differ + self, *blobs: List[Union[str, BlobProperties, dict]], + **kwargs + ) -> AsyncIterator[AsyncHttpResponse]: + """Marks the specified blobs or snapshots for deletion. + + The blobs are later deleted during garbage collection. + Note that in order to delete blobs, you must delete all of their + snapshots. You can delete both at the same time with the delete_blobs operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots + and retains the blobs or snapshots for specified number of days. + After specified number of days, blobs' data is removed from the service during garbage collection. + Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` + Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` + + :param blobs: + The blobs to delete. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + snapshot you want to delete: + key: 'snapshot', value type: str + whether to delete snapthots when deleting blob: + key: 'delete_snapshots', value: 'include' or 'only' + if the blob modified or not: + key: 'if_modified_since', 'if_unmodified_since', value type: datetime + etag: + key: 'etag', value type: str + match the etag or not: + key: 'match_condition', value type: MatchConditions + tags match condition: + key: 'if_tags_match_condition', value type: str + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword str delete_snapshots: + Required if a blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. For optimal performance, + this should be set to False + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: An async iterator of responses, one for each blob in order + :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START delete_multiple_blobs] + :end-before: [END delete_multiple_blobs] + :language: python + :dedent: 12 + :caption: Deleting multiple blobs. + """ + if len(blobs) == 0: + return iter(list()) + + reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) + + return await self._batch_send(*reqs, **options) + + @distributed_trace + async def set_standard_blob_tier_blobs( + self, + standard_blob_tier: Union[str, 'StandardBlobTier'], + *blobs: List[Union[str, BlobProperties, dict]], + **kwargs + ) -> AsyncIterator[AsyncHttpResponse]: + """This operation sets the tier on block blobs. + + A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + :param standard_blob_tier: + Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', + 'Archive'. The hot tier is optimized for storing data that is accessed + frequently. The cool storage tier is optimized for storing data that + is infrequently accessed and stored for at least a month. The archive + tier is optimized for storing data that is rarely accessed and stored + for at least six months with flexible latency requirements. + + .. note:: + If you want to set different tier on different blobs please set this positional parameter to None. + Then the blob tier on every BlobProperties will be taken. + + :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier + :param blobs: + The blobs with which to interact. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + blob name: + key: 'name', value type: str + standard blob tier: + key: 'blob_tier', value type: StandardBlobTier + rehydrate priority: + key: 'rehydrate_priority', value type: RehydratePriority + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + tags match condition: + key: 'if_tags_match_condition', value type: str + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. For optimal performance, + this should be set to False. + :return: An async iterator of responses, one for each blob in order + :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] + """ + reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) + + return await self._batch_send(*reqs, **options) + + @distributed_trace + async def set_premium_page_blob_tier_blobs( + self, + premium_page_blob_tier: Union[str, 'PremiumPageBlobTier'], + *blobs: List[Union[str, BlobProperties, dict]], + **kwargs + ) -> AsyncIterator[AsyncHttpResponse]: + """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts. + + :param premium_page_blob_tier: + A page blob tier value to set on all blobs to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + + .. note:: + If you want to set different tier on different blobs please set this positional parameter to None. + Then the blob tier on every BlobProperties will be taken. + + :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + premium blob tier: + key: 'blob_tier', value type: PremiumPageBlobTier + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. For optimal performance, + this should be set to False. + :return: An async iterator of responses, one for each blob in order + :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] + """ + reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) + + return await self._batch_send(*reqs, **options) + + def get_blob_client( + self, blob, # type: Union[BlobProperties, str] + snapshot=None # type: str + ): + # type: (...) -> BlobClient + """Get a client to interact with the specified blob. + + The blob need not already exist. + + :param blob: + The blob with which to interact. + :type blob: str or ~azure.storage.blob.BlobProperties + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`~BlobClient.create_snapshot()`. + :returns: A BlobClient. + :rtype: ~azure.storage.blob.aio.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START get_blob_client] + :end-before: [END get_blob_client] + :language: python + :dedent: 12 + :caption: Get the blob client. + """ + blob_name = _get_blob_name(blob) + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return BlobClient( + self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function, loop=self._loop) diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_download_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_download_async.py new file mode 100644 index 0000000..1f05309 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_download_async.py @@ -0,0 +1,549 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import asyncio +import sys +from io import BytesIO +from itertools import islice +import warnings +from typing import AsyncIterator + +from aiohttp import ClientPayloadError +from azure.core.exceptions import HttpResponseError, ServiceResponseError +from .._shared.encryption import decrypt_blob +from .._shared.request_handlers import validate_and_format_range_headers +from .._shared.response_handlers import process_storage_error, parse_length_from_content_range +from .._deserialize import get_page_ranges_result +from .._download import process_range_and_offset, _ChunkDownloader + +async def process_content(data, start_offset, end_offset, encryption): + if data is None: + raise ValueError("Response cannot be None.") + content = data.response.body() + if encryption.get('key') is not None or encryption.get('resolver') is not None: + try: + return decrypt_blob( + encryption.get('required'), + encryption.get('key'), + encryption.get('resolver'), + content, + start_offset, + end_offset, + data.response.headers) + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=data.response, + error=error) + return content + + +class _AsyncChunkDownloader(_ChunkDownloader): + def __init__(self, **kwargs): + super(_AsyncChunkDownloader, self).__init__(**kwargs) + self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None + self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None + + async def process_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) + length = chunk_end - chunk_start + if length > 0: + await self._write_to_stream(chunk_data, chunk_start) + await self._update_progress(length) + + async def yield_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + return await self._download_chunk(chunk_start, chunk_end - 1) + + async def _update_progress(self, length): + if self.progress_lock: + async with self.progress_lock: # pylint: disable=not-async-context-manager + self.progress_total += length + else: + self.progress_total += length + + async def _write_to_stream(self, chunk_data, chunk_start): + if self.stream_lock: + async with self.stream_lock: # pylint: disable=not-async-context-manager + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + else: + self.stream.write(chunk_data) + + async def _download_chunk(self, chunk_start, chunk_end): + download_range, offset = process_range_and_offset( + chunk_start, chunk_end, chunk_end, self.encryption_options) + + # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. + # Do optimize and create empty chunk locally if condition is met. + if self._do_optimize(download_range[0], download_range[1]): + chunk_data = b"\x00" * self.chunk_size + else: + range_header, range_validation = validate_and_format_range_headers( + download_range[0], + download_range[1], + check_content_md5=self.validate_content + ) + retry_active = True + retry_total = 3 + while retry_active: + try: + _, response = await self.client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self.validate_content, + data_stream_total=self.total_size, + download_stream_current=self.progress_total, + **self.request_options + ) + retry_active = False + + except HttpResponseError as error: + process_storage_error(error) + except ClientPayloadError as error: + retry_total -= 1 + if retry_total <= 0: + raise ServiceResponseError(error, error=error) + await asyncio.sleep(1) + + chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) + + + # This makes sure that if_match is set so that we can validate + # that subsequent downloads are to an unmodified blob + if self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = response.properties.etag + + return chunk_data + + +class _AsyncChunkIterator(object): + """Async iterator for chunks in blob download stream.""" + + def __init__(self, size, content, downloader, chunk_size): + self.size = size + self._chunk_size = chunk_size + self._current_content = content + self._iter_downloader = downloader + self._iter_chunks = None + self._complete = (size == 0) + + def __len__(self): + return self.size + + def __iter__(self): + raise TypeError("Async stream must be iterated asynchronously.") + + def __aiter__(self): + return self + + async def __anext__(self): + """Iterate through responses.""" + if self._complete: + raise StopAsyncIteration("Download complete") + if not self._iter_downloader: + # cut the data obtained from initial GET into chunks + if len(self._current_content) > self._chunk_size: + return self._get_chunk_data() + self._complete = True + return self._current_content + + if not self._iter_chunks: + self._iter_chunks = self._iter_downloader.get_chunk_offsets() + + # initial GET result still has more than _chunk_size bytes of data + if len(self._current_content) >= self._chunk_size: + return self._get_chunk_data() + + try: + chunk = next(self._iter_chunks) + self._current_content += await self._iter_downloader.yield_chunk(chunk) + except StopIteration: + self._complete = True + # it's likely that there some data left in self._current_content + if self._current_content: + return self._current_content + raise StopAsyncIteration("Download complete") + + return self._get_chunk_data() + + def _get_chunk_data(self): + chunk_data = self._current_content[: self._chunk_size] + self._current_content = self._current_content[self._chunk_size:] + return chunk_data + + +class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the blob being downloaded. + :ivar str container: + The name of the container where the blob is. + :ivar ~azure.storage.blob.BlobProperties properties: + The properties of the blob being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if speficied, + otherwise the total size of the blob. + """ + + def __init__( + self, + clients=None, + config=None, + start_range=None, + end_range=None, + validate_content=None, + encryption_options=None, + max_concurrency=1, + name=None, + container=None, + encoding=None, + **kwargs + ): + self.name = name + self.container = container + self.properties = None + self.size = None + + self._clients = clients + self._config = config + self._start_range = start_range + self._end_range = end_range + self._max_concurrency = max_concurrency + self._encoding = encoding + self._validate_content = validate_content + self._encryption_options = encryption_options or {} + self._request_options = kwargs + self._location_mode = None + self._download_complete = False + self._current_content = None + self._file_size = None + self._non_empty_ranges = None + self._response = None + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + self._first_get_size = self._config.max_single_get_size if not self._validate_content \ + else self._config.max_chunk_get_size + initial_request_start = self._start_range if self._start_range is not None else 0 + if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: + initial_request_end = self._end_range + else: + initial_request_end = initial_request_start + self._first_get_size - 1 + + self._initial_range, self._initial_offset = process_range_and_offset( + initial_request_start, initial_request_end, self._end_range, self._encryption_options + ) + + def __len__(self): + return self.size + + async def _setup(self): + self._response = await self._initial_request() + self.properties = self._response.properties + self.properties.name = self.name + self.properties.container = self.container + + # Set the content length to the download size instead of the size of + # the last range + self.properties.size = self.size + + # Overwrite the content range to the user requested range + self.properties.content_range = 'bytes {0}-{1}/{2}'.format( + self._start_range, + self._end_range, + self._file_size + ) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + self.properties.content_md5 = None + + if self.size == 0: + self._current_content = b"" + else: + self._current_content = await process_content( + self._response, + self._initial_offset[0], + self._initial_offset[1], + self._encryption_options + ) + + async def _initial_request(self): + range_header, range_validation = validate_and_format_range_headers( + self._initial_range[0], + self._initial_range[1], + start_range_required=False, + end_range_required=False, + check_content_md5=self._validate_content) + + retry_active = True + retry_total = 3 + while retry_active: + try: + location_mode, response = await self._clients.blob.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self._validate_content, + data_stream_total=None, + download_stream_current=0, + **self._request_options) + + # Check the location we read from to ensure we use the same one + # for subsequent requests. + self._location_mode = location_mode + + # Parse the total file size and adjust the download size if ranges + # were specified + self._file_size = parse_length_from_content_range(response.properties.content_range) + if self._end_range is not None: + # Use the length unless it is over the end of the file + self.size = min(self._file_size, self._end_range - self._start_range + 1) + elif self._start_range is not None: + self.size = self._file_size - self._start_range + else: + self.size = self._file_size + retry_active = False + + except HttpResponseError as error: + if self._start_range is None and error.response.status_code == 416: + # Get range will fail on an empty file. If the user did not + # request a range, do a regular get request in order to get + # any properties. + try: + _, response = await self._clients.blob.download( + validate_content=self._validate_content, + data_stream_total=0, + download_stream_current=0, + **self._request_options) + retry_active = False + except HttpResponseError as error: + process_storage_error(error) + + # Set the download size to empty + self.size = 0 + self._file_size = 0 + else: + process_storage_error(error) + + except ClientPayloadError as error: + retry_total -= 1 + if retry_total <= 0: + raise ServiceResponseError(error, error=error) + await asyncio.sleep(1) + + # get page ranges to optimize downloading sparse page blob + if response.properties.blob_type == 'PageBlob': + try: + page_ranges = await self._clients.page_blob.get_page_ranges() + self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] + except HttpResponseError: + pass + + # If the file is small, the download is complete at this point. + # If file size is large, download the rest of the file in chunks. + if response.properties.size != self.size: + # Lock on the etag. This can be overriden by the user by specifying '*' + if self._request_options.get('modified_access_conditions'): + if not self._request_options['modified_access_conditions'].if_match: + self._request_options['modified_access_conditions'].if_match = response.properties.etag + else: + self._download_complete = True + return response + + def chunks(self): + # type: () -> AsyncIterator[bytes] + """Iterate over chunks in the download stream. + + :rtype: AsyncIterator[bytes] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world_async.py + :start-after: [START download_a_blob_in_chunk] + :end-before: [END download_a_blob_in_chunk] + :language: python + :dedent: 16 + :caption: Download a blob using chunks(). + """ + if self.size == 0 or self._download_complete: + iter_downloader = None + else: + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + iter_downloader = _AsyncChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # Start where the first download ended + end_range=data_end, + stream=None, + parallel=False, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options) + return _AsyncChunkIterator( + size=self.size, + content=self._current_content, + downloader=iter_downloader, + chunk_size=self._config.max_chunk_get_size) + + async def readall(self): + """Download the contents of this blob. + + This operation is blocking until all data is downloaded. + :rtype: bytes or str + """ + stream = BytesIO() + await self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + async def content_as_bytes(self, max_concurrency=1): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :rtype: bytes + """ + warnings.warn( + "content_as_bytes is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + return await self.readall() + + async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): + """Download the contents of this blob, and decode as text. + + This operation is blocking until all data is downloaded. + + :param int max_concurrency: + The number of parallel connections with which to download. + :param str encoding: + Test encoding to decode the downloaded bytes. Default is UTF-8. + :rtype: str + """ + warnings.warn( + "content_as_text is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self._encoding = encoding + return await self.readall() + + async def readinto(self, stream): + """Download the contents of this blob to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + # the stream must be seekable if parallel download is required + parallel = self._max_concurrency > 1 + if parallel: + error_message = "Target stream handle must be seekable." + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(error_message) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(error_message) + + # Write the content to the user stream + stream.write(self._current_content) + if self._download_complete: + return self.size + + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + downloader = _AsyncChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # start where the first download ended + end_range=data_end, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options) + + dl_tasks = downloader.get_chunk_offsets() + running_futures = [ + asyncio.ensure_future(downloader.process_chunk(d)) + for d in islice(dl_tasks, 0, self._max_concurrency) + ] + while running_futures: + # Wait for some download to finish before adding a new one + done, running_futures = await asyncio.wait( + running_futures, return_when=asyncio.FIRST_COMPLETED) + try: + for task in done: + task.result() + except HttpResponseError as error: + process_storage_error(error) + try: + next_chunk = next(dl_tasks) + except StopIteration: + break + else: + running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) + + if running_futures: + # Wait for the remaining downloads to finish + done, _running_futures = await asyncio.wait(running_futures) + try: + for task in done: + task.result() + except HttpResponseError as error: + process_storage_error(error) + return self.size + + async def download_to_stream(self, stream, max_concurrency=1): + """Download the contents of this blob to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :param int max_concurrency: + The number of parallel connections with which to download. + :returns: The properties of the downloaded blob. + :rtype: Any + """ + warnings.warn( + "download_to_stream is deprecated, use readinto instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + await self.readinto(stream) + return self.properties diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_lease_async.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_lease_async.py new file mode 100644 index 0000000..79e6733 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_lease_async.py @@ -0,0 +1,325 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, + TypeVar, TYPE_CHECKING +) + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator_async import distributed_trace_async + +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._serialize import get_modify_conditions +from .._lease import BlobLeaseClient as LeaseClientBase + +if TYPE_CHECKING: + from datetime import datetime + from .._generated.operations import BlobOperations, ContainerOperations + BlobClient = TypeVar("BlobClient") + ContainerClient = TypeVar("ContainerClient") + + +class BlobLeaseClient(LeaseClientBase): + """Creates a new BlobLeaseClient. + + This client provides lease operations on a BlobClient or ContainerClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the blob or container to lease. + :type client: ~azure.storage.blob.aio.BlobClient or + ~azure.storage.blob.aio.ContainerClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + + def __enter__(self): + raise TypeError("Async lease must use 'async with'.") + + def __exit__(self, *args): + self.release() + + async def __aenter__(self): + return self + + async def __aexit__(self, *args): + await self.release() + + @distributed_trace_async + async def acquire(self, lease_duration=-1, **kwargs): + # type: (int, Any) -> None + """Requests a new lease. + + If the container does not have an active lease, the Blob service creates a + lease on the container and returns a new lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.acquire_lease( + timeout=kwargs.pop('timeout', None), + duration=lease_duration, + proposed_lease_id=self.id, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + self.etag = response.get('etag') # type: str + + @distributed_trace_async + async def renew(self, **kwargs): + # type: (Any) -> None + """Renews the lease. + + The lease can be renewed if the lease ID specified in the + lease client matches that associated with the container or blob. Note that + the lease may be renewed even if it has expired as long as the container + or blob has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.renew_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def release(self, **kwargs): + # type: (Any) -> None + """Release the lease. + + The lease may be released if the client lease id specified matches + that associated with the container or blob. Releasing the lease allows another client + to immediately acquire the lease for the container or blob as soon as the release is complete. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.release_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """Change the lease ID of an active lease. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.change_lease( + lease_id=self.id, + proposed_lease_id=proposed_lease_id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def break_lease(self, lease_break_period=None, **kwargs): + # type: (Optional[int], Any) -> int + """Break the lease, if the container or blob has an active lease. + + Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. When a lease + is broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the container or blob. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :param int lease_break_period: + This is the proposed duration of seconds that the lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the lease. If longer, the time remaining on the lease is used. + A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.break_lease( + timeout=kwargs.pop('timeout', None), + break_period=lease_break_period, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_list_blobs_helper.py new file mode 100644 index 0000000..058572f --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_list_blobs_helper.py @@ -0,0 +1,163 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from azure.core.async_paging import AsyncPageIterator, AsyncItemPaged +from azure.core.exceptions import HttpResponseError +from .._deserialize import get_blob_properties_from_generated_code +from .._models import BlobProperties +from .._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix +from .._shared.models import DictMixin +from .._shared.response_handlers import return_context_and_deserialized, process_storage_error + + +class BlobPropertiesPaged(AsyncPageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.models.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str container: The container that the blobs are listed from. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + prefix=None, + results_per_page=None, + continuation_token=None, + delimiter=None, + location_mode=None): + super(BlobPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.container = container + self.delimiter = delimiter + self.current_page = None + self.location_mode = location_mode + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + prefix=self.prefix, + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.container = self._response.container_name + self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] + + return self._response.next_marker or None, self.current_page + + def _build_item(self, item): + if isinstance(item, BlobProperties): + return item + if isinstance(item, BlobItemInternal): + blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access + blob.container = self.container + return blob + return item + + +class BlobPrefix(AsyncItemPaged, DictMixin): + """An Iterable of Blob properties. + + Returned from walk_blobs when a delimiter is used. + Can be thought of as a virtual blob directory. + + :ivar str name: The prefix, or "directory name" of the blob. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str marker: The continuation token of the current page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.models.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str marker: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__(self, *args, **kwargs): + super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) + self.name = kwargs.get('prefix') + self.prefix = kwargs.get('prefix') + self.results_per_page = kwargs.get('results_per_page') + self.container = kwargs.get('container') + self.delimiter = kwargs.get('delimiter') + self.location_mode = kwargs.get('location_mode') + + +class BlobPrefixPaged(BlobPropertiesPaged): + def __init__(self, *args, **kwargs): + super(BlobPrefixPaged, self).__init__(*args, **kwargs) + self.name = self.prefix + + async def _extract_data_cb(self, get_next_return): + continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) + self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items + self.current_page = [self._build_item(item) for item in self.current_page] + self.delimiter = self._response.delimiter + + return continuation_token, self.current_page + + def _build_item(self, item): + item = super(BlobPrefixPaged, self)._build_item(item) + if isinstance(item, GenBlobPrefix): + return BlobPrefix( + self._command, + container=self.container, + prefix=item.name, + results_per_page=self.results_per_page, + location_mode=self.location_mode) + return item diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_models.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_models.py new file mode 100644 index 0000000..05edd78 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_models.py @@ -0,0 +1,143 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines + +from azure.core.async_paging import AsyncPageIterator +from azure.core.exceptions import HttpResponseError +from .._deserialize import parse_tags + +from .._models import ContainerProperties, FilteredBlob +from .._shared.response_handlers import return_context_and_deserialized, process_storage_error + +from .._generated.models import FilterBlobItem + + +class ContainerPropertiesPaged(AsyncPageIterator): + """An Iterable of Container properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A container name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.models.ContainerProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only containers whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of container names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(ContainerPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [self._build_item(item) for item in self._response.container_items] + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_item(item): + return ContainerProperties._from_generated(item) # pylint: disable=protected-access + + +class FilteredBlobPaged(AsyncPageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.BlobProperties) + :ivar str container: The container that the blobs are listed from. + + :param callable command: Function to retrieve the next page of items. + :param str container: The name of the container. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + results_per_page=None, + continuation_token=None, + location_mode=None): + super(FilteredBlobPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.marker = continuation_token + self.results_per_page = results_per_page + self.container = container + self.current_page = None + self.location_mode = location_mode + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.marker = self._response.next_marker + self.current_page = [self._build_item(item) for item in self._response.blobs] + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_item(item): + if isinstance(item, FilterBlobItem): + tags = parse_tags(item.tags) + blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags) + return blob + return item diff --git a/azure/multiapi/storagev2/blob/v2020_06_12/aio/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_upload_helpers.py new file mode 100644 index 0000000..36d1e44 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_06_12/aio/_upload_helpers.py @@ -0,0 +1,270 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from io import SEEK_SET, UnsupportedOperation +from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import + +import six +from azure.core.exceptions import ResourceModifiedError, HttpResponseError + +from .._shared.response_handlers import ( + process_storage_error, + return_response_headers) +from .._shared.uploads_async import ( + upload_data_chunks, + upload_substream_blocks, + BlockBlobChunkUploader, + PageBlobChunkUploader, + AppendBlobChunkUploader) +from .._shared.encryption import generate_blob_encryption_data, encrypt_blob +from .._generated.models import ( + BlockLookupList, + AppendPositionAccessConditions, + ModifiedAccessConditions, +) +from .._upload_helpers import _convert_mod_error, _any_conditions + +if TYPE_CHECKING: + from datetime import datetime # pylint: disable=unused-import + BlobLeaseClient = TypeVar("BlobLeaseClient") + + +async def upload_block_blob( # pylint: disable=too-many-locals + client=None, + data=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if not overwrite and not _any_conditions(**kwargs): + kwargs['modified_access_conditions'].if_none_match = '*' + adjusted_count = length + if (encryption_options.get('key') is not None) and (adjusted_count is not None): + adjusted_count += (16 - (length % 16)) + blob_headers = kwargs.pop('blob_headers', None) + tier = kwargs.pop('standard_blob_tier', None) + blob_tags_string = kwargs.pop('blob_tags_string', None) + + # Do single put if the size is smaller than config.max_single_put_size + if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): + try: + data = data.read(length) + if not isinstance(data, six.binary_type): + raise TypeError('Blob data should be of type bytes.') + except AttributeError: + pass + if encryption_options.get('key'): + encryption_data, data = encrypt_blob(data, encryption_options['key']) + headers['x-ms-meta-encryptiondata'] = encryption_data + return await client.upload( + body=data, + content_length=adjusted_count, + blob_http_headers=blob_headers, + headers=headers, + cls=return_response_headers, + validate_content=validate_content, + data_stream_total=adjusted_count, + upload_stream_current=0, + tier=tier.value if tier else None, + blob_tags_string=blob_tags_string, + **kwargs) + + use_original_upload_path = blob_settings.use_byte_buffer or \ + validate_content or encryption_options.get('required') or \ + blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ + hasattr(stream, 'seekable') and not stream.seekable() or \ + not hasattr(stream, 'seek') or not hasattr(stream, 'tell') + + if use_original_upload_path: + if encryption_options.get('key'): + cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) + headers['x-ms-meta-encryptiondata'] = encryption_data + encryption_options['cek'] = cek + encryption_options['vector'] = iv + block_ids = await upload_data_chunks( + service=client, + uploader_class=BlockBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + encryption_options=encryption_options, + headers=headers, + **kwargs + ) + else: + block_ids = await upload_substream_blocks( + service=client, + uploader_class=BlockBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + headers=headers, + **kwargs + ) + + block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) + block_lookup.latest = block_ids + return await client.commit_block_list( + block_lookup, + blob_http_headers=blob_headers, + cls=return_response_headers, + validate_content=validate_content, + headers=headers, + tier=tier.value if tier else None, + blob_tags_string=blob_tags_string, + **kwargs) + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceModifiedError as mod_error: + if not overwrite: + _convert_mod_error(mod_error) + raise + + +async def upload_page_blob( + client=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if not overwrite and not _any_conditions(**kwargs): + kwargs['modified_access_conditions'].if_none_match = '*' + if length is None or length < 0: + raise ValueError("A content length must be specified for a Page Blob.") + if length % 512 != 0: + raise ValueError("Invalid page blob size: {0}. " + "The size must be aligned to a 512-byte boundary.".format(length)) + if kwargs.get('premium_page_blob_tier'): + premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') + try: + headers['x-ms-access-tier'] = premium_page_blob_tier.value + except AttributeError: + headers['x-ms-access-tier'] = premium_page_blob_tier + if encryption_options and encryption_options.get('data'): + headers['x-ms-meta-encryptiondata'] = encryption_options['data'] + blob_tags_string = kwargs.pop('blob_tags_string', None) + + response = await client.create( + content_length=0, + blob_content_length=length, + blob_sequence_number=None, + blob_http_headers=kwargs.pop('blob_headers', None), + blob_tags_string=blob_tags_string, + cls=return_response_headers, + headers=headers, + **kwargs) + if length == 0: + return response + + kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) + return await upload_data_chunks( + service=client, + uploader_class=PageBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_page_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + encryption_options=encryption_options, + headers=headers, + **kwargs) + + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceModifiedError as mod_error: + if not overwrite: + _convert_mod_error(mod_error) + raise + + +async def upload_append_blob( # pylint: disable=unused-argument + client=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if length == 0: + return {} + blob_headers = kwargs.pop('blob_headers', None) + append_conditions = AppendPositionAccessConditions( + max_size=kwargs.pop('maxsize_condition', None), + append_position=None) + blob_tags_string = kwargs.pop('blob_tags_string', None) + + try: + if overwrite: + await client.create( + content_length=0, + blob_http_headers=blob_headers, + headers=headers, + blob_tags_string=blob_tags_string, + **kwargs) + return await upload_data_chunks( + service=client, + uploader_class=AppendBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + append_position_access_conditions=append_conditions, + headers=headers, + **kwargs) + except HttpResponseError as error: + if error.response.status_code != 404: + raise + # rewind the request body if it is a stream + if hasattr(stream, 'read'): + try: + # attempt to rewind the body to the initial position + stream.seek(0, SEEK_SET) + except UnsupportedOperation: + # if body is not seekable, then retry would not work + raise error + await client.create( + content_length=0, + blob_http_headers=blob_headers, + headers=headers, + blob_tags_string=blob_tags_string, + **kwargs) + return await upload_data_chunks( + service=client, + uploader_class=AppendBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + append_position_access_conditions=append_conditions, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/__init__.py new file mode 100644 index 0000000..58442ed --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/__init__.py @@ -0,0 +1,239 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import os + +from typing import Union, Iterable, AnyStr, IO, Any, Dict # pylint: disable=unused-import +from ._version import VERSION +from ._blob_client import BlobClient +from ._container_client import ContainerClient +from ._blob_service_client import BlobServiceClient +from ._lease import BlobLeaseClient +from ._download import StorageStreamDownloader +from ._quick_query_helper import BlobQueryReader +from ._shared_access_signature import generate_account_sas, generate_container_sas, generate_blob_sas +from ._shared.policies import ExponentialRetry, LinearRetry +from ._shared.response_handlers import PartialBatchErrorException +from ._shared.models import( + LocationMode, + ResourceTypes, + AccountSasPermissions, + StorageErrorCode, + UserDelegationKey +) +from ._generated.models import ( + RehydratePriority, +) +from ._models import ( + BlobType, + BlockState, + StandardBlobTier, + PremiumPageBlobTier, + BlobImmutabilityPolicyMode, + SequenceNumberAction, + PublicAccess, + BlobAnalyticsLogging, + Metrics, + RetentionPolicy, + StaticWebsite, + CorsRule, + ContainerProperties, + BlobProperties, + FilteredBlob, + LeaseProperties, + ContentSettings, + CopyProperties, + BlobBlock, + PageRange, + AccessPolicy, + ContainerSasPermissions, + BlobSasPermissions, + CustomerProvidedEncryptionKey, + ContainerEncryptionScope, + BlobQueryError, + DelimitedJsonDialect, + DelimitedTextDialect, + QuickQueryDialect, + ArrowDialect, + ArrowType, + ObjectReplicationPolicy, + ObjectReplicationRule, + ImmutabilityPolicy +) +from ._list_blobs_helper import BlobPrefix + +__version__ = VERSION + + +def upload_blob_to_url( + blob_url, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + credential=None, # type: Any + **kwargs): + # type: (...) -> Dict[str, Any] + """Upload data to a given URL + + The data will be uploaded as a block blob. + + :param str blob_url: + The full URI to the blob. This can also include a SAS token. + :param data: + The data to upload. This can be bytes, text, an iterable or a file-like object. + :type data: bytes or str or Iterable + :param credential: + The credentials with which to authenticate. This is optional if the + blob URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword bool overwrite: + Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob_to_url will overwrite any existing data. If set to False, the + operation will fail with a ResourceExistsError. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword dict(str,str) metadata: + Name-value pairs associated with the blob as metadata. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword str encoding: + Encoding to use if text is supplied as input. Defaults to UTF-8. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: dict(str, Any) + """ + with BlobClient.from_blob_url(blob_url, credential=credential) as client: + return client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) + + +def _download_to_stream(client, handle, **kwargs): + """Download data to specified open file-handle.""" + stream = client.download_blob(**kwargs) + stream.readinto(handle) + + +def download_blob_from_url( + blob_url, # type: str + output, # type: str + credential=None, # type: Any + **kwargs): + # type: (...) -> None + """Download the contents of a blob to a local file or stream. + + :param str blob_url: + The full URI to the blob. This can also include a SAS token. + :param output: + Where the data should be downloaded to. This could be either a file path to write to, + or an open IO handle to write to. + :type output: str or writable stream. + :param credential: + The credentials with which to authenticate. This is optional if the + blob URL already has a SAS token or the blob is public. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword bool overwrite: + Whether the local file should be overwritten if it already exists. The default value is + `False` - in which case a ValueError will be raised if the file already exists. If set to + `True`, an attempt will be made to write to the existing file. If a stream handle is passed + in, this value is ignored. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :keyword int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :rtype: None + """ + overwrite = kwargs.pop('overwrite', False) + with BlobClient.from_blob_url(blob_url, credential=credential) as client: + if hasattr(output, 'write'): + _download_to_stream(client, output, **kwargs) + else: + if not overwrite and os.path.isfile(output): + raise ValueError("The file '{}' already exists.".format(output)) + with open(output, 'wb') as file_handle: + _download_to_stream(client, file_handle, **kwargs) + + +__all__ = [ + 'upload_blob_to_url', + 'download_blob_from_url', + 'BlobServiceClient', + 'ContainerClient', + 'BlobClient', + 'BlobType', + 'BlobLeaseClient', + 'StorageErrorCode', + 'UserDelegationKey', + 'ExponentialRetry', + 'LinearRetry', + 'LocationMode', + 'BlockState', + 'StandardBlobTier', + 'PremiumPageBlobTier', + 'SequenceNumberAction', + 'BlobImmutabilityPolicyMode', + 'ImmutabilityPolicy', + 'PublicAccess', + 'BlobAnalyticsLogging', + 'Metrics', + 'RetentionPolicy', + 'StaticWebsite', + 'CorsRule', + 'ContainerProperties', + 'BlobProperties', + 'BlobPrefix', + 'FilteredBlob', + 'LeaseProperties', + 'ContentSettings', + 'CopyProperties', + 'BlobBlock', + 'PageRange', + 'AccessPolicy', + 'QuickQueryDialect', + 'ContainerSasPermissions', + 'BlobSasPermissions', + 'ResourceTypes', + 'AccountSasPermissions', + 'StorageStreamDownloader', + 'CustomerProvidedEncryptionKey', + 'RehydratePriority', + 'generate_account_sas', + 'generate_container_sas', + 'generate_blob_sas', + 'PartialBatchErrorException', + 'ContainerEncryptionScope', + 'BlobQueryError', + 'DelimitedJsonDialect', + 'DelimitedTextDialect', + 'ArrowDialect', + 'ArrowType', + 'BlobQueryReader', + 'ObjectReplicationPolicy', + 'ObjectReplicationRule' +] diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_blob_client.py b/azure/multiapi/storagev2/blob/v2020_10_02/_blob_client.py new file mode 100644 index 0000000..902c013 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_blob_client.py @@ -0,0 +1,3977 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines,no-self-use +from functools import partial +from io import BytesIO +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, + TYPE_CHECKING +) + +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore + +import six +from azure.core.pipeline import Pipeline +from azure.core.tracing.decorator import distributed_trace +from azure.core.exceptions import ResourceNotFoundError, HttpResponseError, ResourceExistsError + +from ._shared import encode_base64 +from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query, TransportWrapper +from ._shared.encryption import generate_blob_encryption_data +from ._shared.uploads import IterStreamer +from ._shared.request_handlers import ( + add_metadata_headers, get_length, read_length, + validate_and_format_range_headers) +from ._shared.response_handlers import return_response_headers, process_storage_error, return_headers_and_deserialized +from ._generated import AzureBlobStorage +from ._generated.models import ( # pylint: disable=unused-import + DeleteSnapshotsOptionType, + BlobHTTPHeaders, + BlockLookupList, + AppendPositionAccessConditions, + SequenceNumberAccessConditions, + QueryRequest, + CpkInfo) +from ._serialize import ( + get_modify_conditions, + get_source_conditions, + get_cpk_scope_info, + get_api_version, + serialize_blob_tags_header, + serialize_blob_tags, + serialize_query_format, get_access_conditions +) +from ._deserialize import get_page_ranges_result, deserialize_blob_properties, deserialize_blob_stream, parse_tags, \ + deserialize_pipeline_response_into_cls +from ._quick_query_helper import BlobQueryReader +from ._upload_helpers import ( + upload_block_blob, + upload_append_blob, + upload_page_blob, _any_conditions) +from ._models import BlobType, BlobBlock, BlobProperties, BlobQueryError, QuickQueryDialect, \ + DelimitedJsonDialect, DelimitedTextDialect +from ._download import StorageStreamDownloader +from ._lease import BlobLeaseClient + +if TYPE_CHECKING: + from datetime import datetime + from ._generated.models import BlockList + from ._models import ( # pylint: disable=unused-import + ContentSettings, + PremiumPageBlobTier, + StandardBlobTier, + SequenceNumberAction + ) + +_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( + 'The require_encryption flag is set, but encryption is not supported' + ' for this method.') + + +class BlobClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods + """A client to interact with a specific blob, although that blob may not yet exist. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the blob, + use the :func:`from_blob_url` classmethod. + :param container_name: The container name for the blob. + :type container_name: str + :param blob_name: The name of the blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob_name: str + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_blob_client] + :end-before: [END create_blob_client] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a URL to a public blob (no auth needed). + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_blob_client_sas_url] + :end-before: [END create_blob_client_sas_url] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a SAS URL to a blob. + """ + def __init__( + self, account_url, # type: str + container_name, # type: str + blob_name, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + + if not (container_name and blob_name): + raise ValueError("Please specify a container name and blob name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + path_snapshot, sas_token = parse_query(parsed_url.query) + + self.container_name = container_name + self.blob_name = blob_name + try: + self.snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + self.snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + self.snapshot = snapshot or path_snapshot + + # This parameter is used for the hierarchy traversal. Give precedence to credential. + self._raw_credential = credential if credential else sas_token + self._query_str, credential = self._format_query_string(sas_token, credential, snapshot=self.snapshot) + super(BlobClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) + self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + def _format_url(self, hostname): + container_name = self.container_name + if isinstance(container_name, six.text_type): + container_name = container_name.encode('UTF-8') + return "{}://{}/{}/{}{}".format( + self.scheme, + hostname, + quote(container_name), + quote(self.blob_name, safe='~/'), + self._query_str) + + def _encode_source_url(self, source_url): + parsed_source_url = urlparse(source_url) + source_scheme = parsed_source_url.scheme + source_hostname = parsed_source_url.netloc.rstrip('/') + source_path = unquote(parsed_source_url.path) + source_query = parsed_source_url.query + result = ["{}://{}{}".format(source_scheme, source_hostname, quote(source_path, safe='~/'))] + if source_query: + result.append(source_query) + return '?'.join(result) + + @classmethod + def from_blob_url(cls, blob_url, credential=None, snapshot=None, **kwargs): + # type: (str, Optional[Any], Optional[Union[str, Dict[str, Any]]], Any) -> BlobClient + """Create BlobClient from a blob url. This doesn't support customized blob url with '/' in blob name. + + :param str blob_url: + The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be + either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. + :type blob_url: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. If specified, this will override + the snapshot in the url. + :returns: A Blob client. + :rtype: ~azure.storage.blob.BlobClient + """ + try: + if not blob_url.lower().startswith('http'): + blob_url = "https://" + blob_url + except AttributeError: + raise ValueError("Blob URL must be a string.") + parsed_url = urlparse(blob_url.rstrip('/')) + + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(blob_url)) + + account_path = "" + if ".core." in parsed_url.netloc: + # .core. is indicating non-customized url. Blob name with directory info can also be parsed. + path_blob = parsed_url.path.lstrip('/').split('/', 1) + elif "localhost" in parsed_url.netloc or "127.0.0.1" in parsed_url.netloc: + path_blob = parsed_url.path.lstrip('/').split('/', 2) + account_path += '/' + path_blob[0] + else: + # for customized url. blob name that has directory info cannot be parsed. + path_blob = parsed_url.path.lstrip('/').split('/') + if len(path_blob) > 2: + account_path = "/" + "/".join(path_blob[:-2]) + account_url = "{}://{}{}?{}".format( + parsed_url.scheme, + parsed_url.netloc.rstrip('/'), + account_path, + parsed_url.query) + container_name, blob_name = unquote(path_blob[-2]), unquote(path_blob[-1]) + if not container_name or not blob_name: + raise ValueError("Invalid URL. Provide a blob_url with a valid blob and container name.") + + path_snapshot, _ = parse_query(parsed_url.query) + if snapshot: + try: + path_snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + path_snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + path_snapshot = snapshot + + return cls( + account_url, container_name=container_name, blob_name=blob_name, + snapshot=path_snapshot, credential=credential, **kwargs + ) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + container_name, # type: str + blob_name, # type: str + snapshot=None, # type: Optional[str] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> BlobClient + """Create BlobClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param container_name: The container name for the blob. + :type container_name: str + :param blob_name: The name of the blob with which to interact. + :type blob_name: str + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :returns: A Blob client. + :rtype: ~azure.storage.blob.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START auth_from_connection_string_blob] + :end-before: [END auth_from_connection_string_blob] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, container_name=container_name, blob_name=blob_name, + snapshot=snapshot, credential=credential, **kwargs + ) + + @distributed_trace + def get_account_information(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """Gets information related to the storage account in which the blob resides. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + """ + try: + return self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _upload_blob_options( # pylint:disable=too-many-statements + self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] + length=None, # type: Optional[int] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption and not self.key_encryption_key: + raise ValueError("Encryption required but no key was provided.") + encryption_options = { + 'required': self.require_encryption, + 'key': self.key_encryption_key, + 'resolver': self.key_resolver_function, + } + if self.key_encryption_key is not None: + cek, iv, encryption_data = generate_blob_encryption_data(self.key_encryption_key) + encryption_options['cek'] = cek + encryption_options['vector'] = iv + encryption_options['data'] = encryption_data + + encoding = kwargs.pop('encoding', 'UTF-8') + if isinstance(data, six.text_type): + data = data.encode(encoding) # type: ignore + if length is None: + length = get_length(data) + if isinstance(data, bytes): + data = data[:length] + + if isinstance(data, bytes): + stream = BytesIO(data) + elif hasattr(data, 'read'): + stream = data + elif hasattr(data, '__iter__'): + stream = IterStreamer(data, encoding=encoding) + else: + raise TypeError("Unsupported data type: {}".format(type(data))) + + validate_content = kwargs.pop('validate_content', False) + content_settings = kwargs.pop('content_settings', None) + overwrite = kwargs.pop('overwrite', False) + max_concurrency = kwargs.pop('max_concurrency', 1) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + kwargs['cpk_info'] = cpk_info + + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) + kwargs['modified_access_conditions'] = get_modify_conditions(kwargs) + kwargs['cpk_scope_info'] = get_cpk_scope_info(kwargs) + if content_settings: + kwargs['blob_headers'] = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=content_settings.content_md5, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + kwargs['blob_tags_string'] = serialize_blob_tags_header(kwargs.pop('tags', None)) + kwargs['stream'] = stream + kwargs['length'] = length + kwargs['overwrite'] = overwrite + kwargs['headers'] = headers + kwargs['validate_content'] = validate_content + kwargs['blob_settings'] = self._config + kwargs['max_concurrency'] = max_concurrency + kwargs['encryption_options'] = encryption_options + + if blob_type == BlobType.BlockBlob: + kwargs['client'] = self._client.block_blob + kwargs['data'] = data + elif blob_type == BlobType.PageBlob: + kwargs['client'] = self._client.page_blob + elif blob_type == BlobType.AppendBlob: + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + kwargs['client'] = self._client.append_blob + else: + raise ValueError("Unsupported BlobType: {}".format(blob_type)) + return kwargs + + def _upload_blob_from_url_options(self, source_url, **kwargs): + # type: (...) -> Dict[str, Any] + tier = kwargs.pop('standard_blob_tier', None) + overwrite = kwargs.pop('overwrite', False) + content_settings = kwargs.pop('content_settings', None) + source_authorization = kwargs.pop('source_authorization', None) + if content_settings: + kwargs['blob_http_headers'] = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=None, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'copy_source_authorization': source_authorization, + 'content_length': 0, + 'copy_source_blob_properties': kwargs.pop('include_source_blob_properties', True), + 'source_content_md5': kwargs.pop('source_content_md5', None), + 'copy_source': source_url, + 'modified_access_conditions': get_modify_conditions(kwargs), + 'blob_tags_string': serialize_blob_tags_header(kwargs.pop('tags', None)), + 'cls': return_response_headers, + 'lease_access_conditions': get_access_conditions(kwargs.pop('destination_lease', None)), + 'tier': tier.value if tier else None, + 'source_modified_access_conditions': get_source_conditions(kwargs), + 'cpk_info': cpk_info, + 'cpk_scope_info': get_cpk_scope_info(kwargs) + } + options.update(kwargs) + if not overwrite and not _any_conditions(**options): # pylint: disable=protected-access + options['modified_access_conditions'].if_none_match = '*' + return options + + @distributed_trace + def upload_blob_from_url(self, source_url, **kwargs): + # type: (str, Any) -> Dict[str, Any] + """ + Creates a new Block Blob where the content of the blob is read from a given URL. + The content of an existing blob is overwritten with the new blob. + + :param str source_url: + A URL of up to 2 KB in length that specifies a file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. + :keyword bool include_source_blob_properties: + Indicates if properties from the source blob should be copied. Defaults to True. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + :paramtype tags: dict(str, str) + :keyword bytearray source_content_md5: + Specify the md5 that is used to verify the integrity of the source bytes. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword destination_lease: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + """ + options = self._upload_blob_from_url_options( + source_url=self._encode_source_url(source_url), + **kwargs) + try: + return self._client.block_blob.put_blob_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def upload_blob( # pylint: disable=too-many-locals + self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] + length=None, # type: Optional[int] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Any + """Creates a new blob from a data source with automatic chunking. + + :param data: The blob data to upload. + :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be + either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. The exception to the above is with Append + blob types: if set to False and the data already exists, an error will not be raised + and the data will be appended to the existing blob. If set overwrite=True, then the existing + append blob will be deleted, and a new one created. Defaults to False. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, upload_blob only succeeds if the + blob's lease is active and matches this ID. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + Currently this parameter of upload_blob() API is for BlockBlob only. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + Currently this parameter of upload_blob() API is for BlockBlob only. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int max_concurrency: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world.py + :start-after: [START upload_a_blob] + :end-before: [END upload_a_blob] + :language: python + :dedent: 12 + :caption: Upload a blob to the container. + """ + options = self._upload_blob_options( + data, + blob_type=blob_type, + length=length, + metadata=metadata, + **kwargs) + if blob_type == BlobType.BlockBlob: + return upload_block_blob(**options) + if blob_type == BlobType.PageBlob: + return upload_page_blob(**options) + return upload_append_blob(**options) + + def _download_blob_options(self, offset=None, length=None, **kwargs): + # type: (Optional[int], Optional[int], **Any) -> Dict[str, Any] + if self.require_encryption and not self.key_encryption_key: + raise ValueError("Encryption required but no key was provided.") + if length is not None and offset is None: + raise ValueError("Offset value must not be None if length is set.") + if length is not None: + length = offset + length - 1 # Service actually uses an end-range inclusive index + + validate_content = kwargs.pop('validate_content', False) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'clients': self._client, + 'config': self._config, + 'start_range': offset, + 'end_range': length, + 'version_id': kwargs.pop('version_id', None), + 'validate_content': validate_content, + 'encryption_options': { + 'required': self.require_encryption, + 'key': self.key_encryption_key, + 'resolver': self.key_resolver_function}, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_info': cpk_info, + 'cls': kwargs.pop('cls', None) or deserialize_blob_stream, + 'max_concurrency':kwargs.pop('max_concurrency', 1), + 'encoding': kwargs.pop('encoding', None), + 'timeout': kwargs.pop('timeout', None), + 'name': self.blob_name, + 'container': self.container_name} + options.update(kwargs) + return options + + @distributed_trace + def download_blob(self, offset=None, length=None, **kwargs): + # type: (Optional[int], Optional[int], **Any) -> StorageStreamDownloader + """Downloads a blob to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the blob into + a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. + + :param int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to download. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, download_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword str encoding: + Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.blob.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world.py + :start-after: [START download_a_blob] + :end-before: [END download_a_blob] + :language: python + :dedent: 12 + :caption: Download a blob. + """ + options = self._download_blob_options( + offset=offset, + length=length, + **kwargs) + return StorageStreamDownloader(**options) + + def _quick_query_options(self, query_expression, + **kwargs): + # type: (str, **Any) -> Dict[str, Any] + delimiter = '\n' + input_format = kwargs.pop('blob_format', None) + if input_format == QuickQueryDialect.DelimitedJson: + input_format = DelimitedJsonDialect() + if input_format == QuickQueryDialect.DelimitedText: + input_format = DelimitedTextDialect() + input_parquet_format = input_format == "ParquetDialect" + if input_format and not input_parquet_format: + try: + delimiter = input_format.lineterminator + except AttributeError: + try: + delimiter = input_format.delimiter + except AttributeError: + raise ValueError("The Type of blob_format can only be DelimitedTextDialect or " + "DelimitedJsonDialect or ParquetDialect") + output_format = kwargs.pop('output_format', None) + if output_format == QuickQueryDialect.DelimitedJson: + output_format = DelimitedJsonDialect() + if output_format == QuickQueryDialect.DelimitedText: + output_format = DelimitedTextDialect() + if output_format: + if output_format == "ParquetDialect": + raise ValueError("ParquetDialect is invalid as an output format.") + try: + delimiter = output_format.lineterminator + except AttributeError: + try: + delimiter = output_format.delimiter + except AttributeError: + pass + else: + output_format = input_format if not input_parquet_format else None + query_request = QueryRequest( + expression=query_expression, + input_serialization=serialize_query_format(input_format), + output_serialization=serialize_query_format(output_format) + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo( + encryption_key=cpk.key_value, + encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm + ) + options = { + 'query_request': query_request, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_info': cpk_info, + 'snapshot': self.snapshot, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_headers_and_deserialized, + } + options.update(kwargs) + return options, delimiter + + @distributed_trace + def query_blob(self, query_expression, **kwargs): + # type: (str, **Any) -> BlobQueryReader + """Enables users to select/project on blob/or blob snapshot data by providing simple query expressions. + This operations returns a BlobQueryReader, users need to use readall() or readinto() to get query data. + + :param str query_expression: + Required. a query statement. + :keyword Callable[~azure.storage.blob.BlobQueryError] on_error: + A function to be called on any processing errors returned by the service. + :keyword blob_format: + Optional. Defines the serialization of the data currently stored in the blob. The default is to + treat the blob data as CSV data formatted in the default dialect. This can be overridden with + a custom DelimitedTextDialect, or DelimitedJsonDialect or "ParquetDialect" (passed as a string or enum). + These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string + :paramtype blob_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect + or ~azure.storage.blob.QuickQueryDialect or str + :keyword output_format: + Optional. Defines the output serialization for the data stream. By default the data will be returned + as it is represented in the blob (Parquet formats default to DelimitedTextDialect). + By providing an output format, the blob data will be reformatted according to that profile. + This value can be a DelimitedTextDialect or a DelimitedJsonDialect or ArrowDialect. + These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string + :paramtype output_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect + or list[~azure.storage.blob.ArrowDialect] or ~azure.storage.blob.QuickQueryDialect or str + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A streaming object (BlobQueryReader) + :rtype: ~azure.storage.blob.BlobQueryReader + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_query.py + :start-after: [START query] + :end-before: [END query] + :language: python + :dedent: 4 + :caption: select/project on blob/or blob snapshot data by providing simple query expressions. + """ + errors = kwargs.pop("on_error", None) + error_cls = kwargs.pop("error_cls", BlobQueryError) + encoding = kwargs.pop("encoding", None) + options, delimiter = self._quick_query_options(query_expression, **kwargs) + try: + headers, raw_response_body = self._client.blob.query(**options) + except HttpResponseError as error: + process_storage_error(error) + return BlobQueryReader( + name=self.blob_name, + container=self.container_name, + errors=errors, + record_delimiter=delimiter, + encoding=encoding, + headers=headers, + response=raw_response_body, + error_cls=error_cls) + + @staticmethod + def _generic_delete_blob_options(delete_snapshots=None, **kwargs): + # type: (str, **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if delete_snapshots: + delete_snapshots = DeleteSnapshotsOptionType(delete_snapshots) + options = { + 'timeout': kwargs.pop('timeout', None), + 'snapshot': kwargs.pop('snapshot', None), # this is added for delete_blobs + 'delete_snapshots': delete_snapshots or None, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions} + options.update(kwargs) + return options + + def _delete_blob_options(self, delete_snapshots=None, **kwargs): + # type: (str, **Any) -> Dict[str, Any] + if self.snapshot and delete_snapshots: + raise ValueError("The delete_snapshots option cannot be used with a specific snapshot.") + options = self._generic_delete_blob_options(delete_snapshots, **kwargs) + options['snapshot'] = self.snapshot + options['version_id'] = kwargs.pop('version_id', None) + options['blob_delete_type'] = kwargs.pop('blob_delete_type', None) + return options + + @distributed_trace + def delete_blob(self, delete_snapshots=None, **kwargs): + # type: (str, **Any) -> None + """Marks the specified blob for deletion. + + The blob is later deleted during garbage collection. + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the delete_blob() + operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob + and retains the blob for a specified number of days. + After the specified number of days, the blob's data is removed from the service during garbage collection. + Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` + option. Soft-deleted blob can be restored using :func:`undelete` operation. + + :param str delete_snapshots: + Required if the blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword lease: + Required if the blob has an active lease. If specified, delete_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world.py + :start-after: [START delete_blob] + :end-before: [END delete_blob] + :language: python + :dedent: 12 + :caption: Delete a blob. + """ + options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) + try: + self._client.blob.delete(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def undelete_blob(self, **kwargs): + # type: (**Any) -> None + """Restores soft-deleted blobs or snapshots. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START undelete_blob] + :end-before: [END undelete_blob] + :language: python + :dedent: 8 + :caption: Undeleting a blob. + """ + try: + self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace() + def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a blob exists with the defined parameters, and returns + False otherwise. + + :kwarg str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to check if it exists. + :kwarg int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + try: + self._client.blob.get_properties( + snapshot=self.snapshot, + **kwargs) + return True + # Encrypted with CPK + except ResourceExistsError: + return True + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceNotFoundError: + return False + + @distributed_trace + def get_blob_properties(self, **kwargs): + # type: (**Any) -> BlobProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the blob. It does not return the content of the blob. + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to get properties. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: BlobProperties + :rtype: ~azure.storage.blob.BlobProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START get_blob_properties] + :end-before: [END get_blob_properties] + :language: python + :dedent: 8 + :caption: Getting the properties for a blob. + """ + # TODO: extract this out as _get_blob_properties_options + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + try: + cls_method = kwargs.pop('cls', None) + if cls_method: + kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method) + blob_props = self._client.blob.get_properties( + timeout=kwargs.pop('timeout', None), + version_id=kwargs.pop('version_id', None), + snapshot=self.snapshot, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=kwargs.pop('cls', None) or deserialize_blob_properties, + cpk_info=cpk_info, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + blob_props.name = self.blob_name + if isinstance(blob_props, BlobProperties): + blob_props.container = self.container_name + blob_props.snapshot = self.snapshot + return blob_props # type: ignore + + def _set_http_headers_options(self, content_settings=None, **kwargs): + # type: (Optional[ContentSettings], **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + blob_headers = None + if content_settings: + blob_headers = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=content_settings.content_md5, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + options = { + 'timeout': kwargs.pop('timeout', None), + 'blob_http_headers': blob_headers, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def set_http_headers(self, content_settings=None, **kwargs): + # type: (Optional[ContentSettings], **Any) -> None + """Sets system properties on the blob. + + If one property is set for the content_settings, all properties will be overridden. + + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + options = self._set_http_headers_options(content_settings=content_settings, **kwargs) + try: + return self._client.blob.set_http_headers(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _set_blob_metadata_options(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + 'headers': headers} + options.update(kwargs) + return options + + @distributed_trace + def set_blob_metadata(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] + """Sets user-defined metadata for the blob as one or more name-value pairs. + + :param metadata: + Dict containing name and value pairs. Each call to this operation + replaces all existing metadata attached to the blob. To remove all + metadata from the blob, call this operation with no metadata headers. + :type metadata: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + """ + options = self._set_blob_metadata_options(metadata=metadata, **kwargs) + try: + return self._client.blob.set_metadata(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_immutability_policy(self, immutability_policy, **kwargs): + # type: (**Any) -> Dict[str, str] + """The Set Immutability Policy operation sets the immutability policy on the blob. + + .. versionadded:: 12.10.0 + This operation was introduced in API version '2020-10-02'. + + :param ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, str] + """ + + kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time + kwargs['immutability_policy_mode'] = immutability_policy.policy_mode + return self._client.blob.set_immutability_policy(cls=return_response_headers, **kwargs) + + @distributed_trace + def delete_immutability_policy(self, **kwargs): + # type: (**Any) -> None + """The Delete Immutability Policy operation deletes the immutability policy on the blob. + + .. versionadded:: 12.10.0 + This operation was introduced in API version '2020-10-02'. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, str] + """ + + self._client.blob.delete_immutability_policy(**kwargs) + + @distributed_trace + def set_legal_hold(self, legal_hold, **kwargs): + # type: (bool, **Any) -> Dict[str, Union[str, datetime, bool]] + """The Set Legal Hold operation sets a legal hold on the blob. + + .. versionadded:: 12.10.0 + This operation was introduced in API version '2020-10-02'. + + :param bool legal_hold: + Specified if a legal hold should be set on the blob. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, Union[str, datetime, bool]] + """ + + return self._client.blob.set_legal_hold(legal_hold, cls=return_response_headers, **kwargs) + + def _create_page_blob_options( # type: ignore + self, size, # type: int + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + blob_headers = None + if content_settings: + blob_headers = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=content_settings.content_md5, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + + sequence_number = kwargs.pop('sequence_number', None) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + immutability_policy = kwargs.pop('immutability_policy', None) + if immutability_policy: + kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time + kwargs['immutability_policy_mode'] = immutability_policy.policy_mode + + if premium_page_blob_tier: + try: + headers['x-ms-access-tier'] = premium_page_blob_tier.value # type: ignore + except AttributeError: + headers['x-ms-access-tier'] = premium_page_blob_tier # type: ignore + + blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) + + options = { + 'content_length': 0, + 'blob_content_length': size, + 'blob_sequence_number': sequence_number, + 'blob_http_headers': blob_headers, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'blob_tags_string': blob_tags_string, + 'cls': return_response_headers, + 'headers': headers} + options.update(kwargs) + return options + + @distributed_trace + def create_page_blob( # type: ignore + self, size, # type: int + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Creates a new Page Blob of the specified size. + + :param int size: + This specifies the maximum size for the page blob, up to 1 TB. + The page blob size must be aligned to a 512-byte boundary. + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword int sequence_number: + Only for Page blobs. The sequence number is a user-controlled value that you can use to + track requests. The value of the sequence number must be between 0 + and 2^63 - 1.The default value is 0. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict[str, Any] + """ + options = self._create_page_blob_options( + size, + content_settings=content_settings, + metadata=metadata, + premium_page_blob_tier=premium_page_blob_tier, + **kwargs) + try: + return self._client.page_blob.create(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _create_append_blob_options(self, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + blob_headers = None + if content_settings: + blob_headers = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=content_settings.content_md5, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + immutability_policy = kwargs.pop('immutability_policy', None) + if immutability_policy: + kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time + kwargs['immutability_policy_mode'] = immutability_policy.policy_mode + + blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) + + options = { + 'content_length': 0, + 'blob_http_headers': blob_headers, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'blob_tags_string': blob_tags_string, + 'cls': return_response_headers, + 'headers': headers} + options.update(kwargs) + return options + + @distributed_trace + def create_append_blob(self, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] + """Creates a new Append Blob. + + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict[str, Any] + """ + options = self._create_append_blob_options( + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return self._client.append_blob.create(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _create_snapshot_options(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + 'headers': headers} + options.update(kwargs) + return options + + @distributed_trace + def create_snapshot(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] + """Creates a snapshot of the blob. + + A snapshot is a read-only version of a blob that's taken at a point in time. + It can be read, copied, or deleted, but not modified. Snapshots provide a way + to back up a blob as it appears at a moment in time. + + A snapshot of a blob has the same name as the base blob from which the snapshot + is taken, with a DateTime value appended to indicate the time at which the + snapshot was taken. + + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + + .. versionadded:: 12.4.0 + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START create_blob_snapshot] + :end-before: [END create_blob_snapshot] + :language: python + :dedent: 8 + :caption: Create a snapshot of the blob. + """ + options = self._create_snapshot_options(metadata=metadata, **kwargs) + try: + return self._client.blob.create_snapshot(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _start_copy_from_url_options(self, source_url, metadata=None, incremental_copy=False, **kwargs): + # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Any] + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + if 'source_lease' in kwargs: + source_lease = kwargs.pop('source_lease') + try: + headers['x-ms-source-lease-id'] = source_lease.id # type: str + except AttributeError: + headers['x-ms-source-lease-id'] = source_lease + + tier = kwargs.pop('premium_page_blob_tier', None) or kwargs.pop('standard_blob_tier', None) + requires_sync = kwargs.pop('requires_sync', None) + source_authorization = kwargs.pop('source_authorization', None) + if source_authorization and incremental_copy: + raise ValueError("Source authorization tokens are not applicable for incremental copying.") + if requires_sync is True: + headers['x-ms-requires-sync'] = str(requires_sync) + if source_authorization: + headers['x-ms-copy-source-authorization'] = source_authorization + else: + if source_authorization: + raise ValueError("Source authorization tokens are only applicable for synchronous copy operations.") + timeout = kwargs.pop('timeout', None) + dest_mod_conditions = get_modify_conditions(kwargs) + blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) + + immutability_policy = kwargs.pop('immutability_policy', None) + if immutability_policy: + kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time + kwargs['immutability_policy_mode'] = immutability_policy.policy_mode + + options = { + 'copy_source': source_url, + 'seal_blob': kwargs.pop('seal_destination_blob', None), + 'timeout': timeout, + 'modified_access_conditions': dest_mod_conditions, + 'blob_tags_string': blob_tags_string, + 'headers': headers, + 'cls': return_response_headers, + } + if not incremental_copy: + source_mod_conditions = get_source_conditions(kwargs) + dest_access_conditions = get_access_conditions(kwargs.pop('destination_lease', None)) + options['source_modified_access_conditions'] = source_mod_conditions + options['lease_access_conditions'] = dest_access_conditions + options['tier'] = tier.value if tier else None + options.update(kwargs) + return options + + @distributed_trace + def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): + # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Union[str, datetime]] + """Copies a blob asynchronously. + + This operation returns a copy operation + object that can be used to wait on the completion of the operation, + as well as check status or abort the copy operation. + The Blob service copies blobs on a best-effort basis. + + The source blob for a copy operation may be a block blob, an append blob, + or a page blob. If the destination blob already exists, it must be of the + same blob type as the source blob. Any existing destination blob will be + overwritten. The destination blob cannot be modified while a copy operation + is in progress. + + When copying from a page blob, the Blob service creates a destination page + blob of the source blob's length, initially containing all zeroes. Then + the source page ranges are enumerated, and non-empty ranges are copied. + + For a block blob or an append blob, the Blob service creates a committed + blob of zero length before returning from this operation. When copying + from a block blob, all committed blocks and their block IDs are copied. + Uncommitted blocks are not copied. At the end of the copy operation, the + destination blob will have the same committed block count as the source. + + When copying from an append blob, all committed blocks are copied. At the + end of the copy operation, the destination blob will have the same committed + block count as the source. + + For all blob types, you can call status() on the returned polling object + to check the status of the copy operation, or wait() to block until the + operation is complete. The final blob will be committed when the copy completes. + + :param str source_url: + A URL of up to 2 KB in length that specifies a file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :param metadata: + Name-value pairs associated with the blob as metadata. If no name-value + pairs are specified, the operation will copy the metadata from the + source blob or file to the destination blob. If one or more name-value + pairs are specified, the destination blob is created with the specified + metadata, and metadata is not copied from the source blob or file. + :type metadata: dict(str, str) + :param bool incremental_copy: + Copies the snapshot of the source page blob to a destination page blob. + The snapshot is copied such that only the differential changes between + the previously copied snapshot are transferred to the destination. + The copied snapshots are complete copies of the original snapshot and + can be read or copied from as usual. Defaults to False. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source + blob has been modified since the specified date/time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source blob + has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has been modified since the specified date/time. + If the destination blob has not been modified, the Blob service returns + status code 412 (Precondition Failed). + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has not been modified since the specified + date/time. If the destination blob has been modified, the Blob service + returns status code 412 (Precondition Failed). + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword destination_lease: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword source_lease: + Specify this to perform the Copy Blob operation only if + the lease ID given matches the active lease ID of the source blob. + :paramtype source_lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword bool seal_destination_blob: + Seal the destination append blob. This operation is only for append blob. + + .. versionadded:: 12.4.0 + + :keyword bool requires_sync: + Enforces that the service will not return a response until the copy is complete. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. This option is only available when `incremental_copy` is + set to False and `requires_sync` is set to True. + :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). + :rtype: dict[str, Union[str, ~datetime.datetime]] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START copy_blob_from_url] + :end-before: [END copy_blob_from_url] + :language: python + :dedent: 12 + :caption: Copy a blob from a URL. + """ + options = self._start_copy_from_url_options( + source_url=self._encode_source_url(source_url), + metadata=metadata, + incremental_copy=incremental_copy, + **kwargs) + try: + if incremental_copy: + return self._client.page_blob.copy_incremental(**options) + return self._client.blob.start_copy_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + def _abort_copy_options(self, copy_id, **kwargs): + # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + try: + copy_id = copy_id.copy.id + except AttributeError: + try: + copy_id = copy_id['copy_id'] + except TypeError: + pass + options = { + 'copy_id': copy_id, + 'lease_access_conditions': access_conditions, + 'timeout': kwargs.pop('timeout', None)} + options.update(kwargs) + return options + + @distributed_trace + def abort_copy(self, copy_id, **kwargs): + # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> None + """Abort an ongoing copy operation. + + This will leave a destination blob with zero length and full metadata. + This will raise an error if the copy operation has already ended. + + :param copy_id: + The copy operation to abort. This can be either an ID string, or an + instance of BlobProperties. + :type copy_id: str or ~azure.storage.blob.BlobProperties + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START abort_copy_blob_from_url] + :end-before: [END abort_copy_blob_from_url] + :language: python + :dedent: 12 + :caption: Abort copying a blob from URL. + """ + options = self._abort_copy_options(copy_id, **kwargs) + try: + self._client.blob.abort_copy_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): + # type: (int, Optional[str], **Any) -> BlobLeaseClient + """Requests a new lease. + + If the blob does not have an active lease, the Blob + Service creates a lease on the blob and returns a new lease. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Blob Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A BlobLeaseClient object. + :rtype: ~azure.storage.blob.BlobLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START acquire_lease_on_blob] + :end-before: [END acquire_lease_on_blob] + :language: python + :dedent: 8 + :caption: Acquiring a lease on a blob. + """ + lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore + lease.acquire(lease_duration=lease_duration, **kwargs) + return lease + + @distributed_trace + def set_standard_blob_tier(self, standard_blob_tier, **kwargs): + # type: (Union[str, StandardBlobTier], Any) -> None + """This operation sets the tier on a block blob. + + A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + :param standard_blob_tier: + Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', + 'Archive'. The hot tier is optimized for storing data that is accessed + frequently. The cool storage tier is optimized for storing data that + is infrequently accessed and stored for at least a month. The archive + tier is optimized for storing data that is rarely accessed and stored + for at least six months with flexible latency requirements. + :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to download. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if standard_blob_tier is None: + raise ValueError("A StandardBlobTier must be specified") + if self.snapshot and kwargs.get('version_id'): + raise ValueError("Snapshot and version_id cannot be set at the same time") + try: + self._client.blob.set_tier( + tier=standard_blob_tier, + snapshot=self.snapshot, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + def _stage_block_options( + self, block_id, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + block_id = encode_base64(str(block_id)) + if isinstance(data, six.text_type): + data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + if length is None: + length = get_length(data) + if length is None: + length, data = read_length(data) + if isinstance(data, bytes): + data = data[:length] + + validate_content = kwargs.pop('validate_content', False) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'block_id': block_id, + 'content_length': length, + 'body': data, + 'transactional_content_md5': None, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'validate_content': validate_content, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + } + options.update(kwargs) + return options + + @distributed_trace + def stage_block( + self, block_id, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Creates a new block to be committed as part of a blob. + + :param str block_id: A string value that identifies the block. + The string should be less than or equal to 64 bytes in size. + For a given blob, the block_id must be the same size for each block. + :param data: The blob data. + :param int length: Size of the block. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword str encoding: + Defaults to UTF-8. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob property dict. + :rtype: dict[str, Any] + """ + options = self._stage_block_options( + block_id, + data, + length=length, + **kwargs) + try: + return self._client.block_blob.stage_block(**options) + except HttpResponseError as error: + process_storage_error(error) + + def _stage_block_from_url_options( + self, block_id, # type: str + source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + source_content_md5=None, # type: Optional[Union[bytes, bytearray]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + source_authorization = kwargs.pop('source_authorization', None) + if source_length is not None and source_offset is None: + raise ValueError("Source offset value must not be None if length is set.") + if source_length is not None: + source_length = source_offset + source_length - 1 + block_id = encode_base64(str(block_id)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + range_header = None + if source_offset is not None: + range_header, _ = validate_and_format_range_headers(source_offset, source_length) + + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'copy_source_authorization': source_authorization, + 'block_id': block_id, + 'content_length': 0, + 'source_url': source_url, + 'source_range': range_header, + 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + } + options.update(kwargs) + return options + + @distributed_trace + def stage_block_from_url( + self, block_id, # type: Union[str, int] + source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + source_content_md5=None, # type: Optional[Union[bytes, bytearray]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Creates a new block to be committed as part of a blob where + the contents are read from a URL. + + :param str block_id: A string value that identifies the block. + The string should be less than or equal to 64 bytes in size. + For a given blob, the block_id must be the same size for each block. + :param str source_url: The URL. + :param int source_offset: + Start of byte range to use for the block. + Must be set if source length is provided. + :param int source_length: The size of the block in bytes. + :param bytearray source_content_md5: + Specify the md5 calculated for the range of + bytes that must be read from the copy source. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + :returns: Blob property dict. + :rtype: dict[str, Any] + """ + options = self._stage_block_from_url_options( + block_id, + source_url=self._encode_source_url(source_url), + source_offset=source_offset, + source_length=source_length, + source_content_md5=source_content_md5, + **kwargs) + try: + return self._client.block_blob.stage_block_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + def _get_block_list_result(self, blocks): + # type: (BlockList) -> Tuple[List[BlobBlock], List[BlobBlock]] + committed = [] # type: List + uncommitted = [] # type: List + if blocks.committed_blocks: + committed = [BlobBlock._from_generated(b) for b in blocks.committed_blocks] # pylint: disable=protected-access + if blocks.uncommitted_blocks: + uncommitted = [BlobBlock._from_generated(b) for b in blocks.uncommitted_blocks] # pylint: disable=protected-access + return committed, uncommitted + + @distributed_trace + def get_block_list(self, block_list_type="committed", **kwargs): + # type: (Optional[str], **Any) -> Tuple[List[BlobBlock], List[BlobBlock]] + """The Get Block List operation retrieves the list of blocks that have + been uploaded as part of a block blob. + + :param str block_list_type: + Specifies whether to return the list of committed + blocks, the list of uncommitted blocks, or both lists together. + Possible values include: 'committed', 'uncommitted', 'all' + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A tuple of two lists - committed and uncommitted blocks + :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + try: + blocks = self._client.block_blob.get_block_list( + list_type=block_list_type, + snapshot=self.snapshot, + timeout=kwargs.pop('timeout', None), + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return self._get_block_list_result(blocks) + + def _commit_block_list_options( # type: ignore + self, block_list, # type: List[BlobBlock] + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) + for block in block_list: + try: + if block.state.value == 'committed': + block_lookup.committed.append(encode_base64(str(block.id))) + elif block.state.value == 'uncommitted': + block_lookup.uncommitted.append(encode_base64(str(block.id))) + else: + block_lookup.latest.append(encode_base64(str(block.id))) + except AttributeError: + block_lookup.latest.append(encode_base64(str(block))) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + blob_headers = None + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if content_settings: + blob_headers = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=content_settings.content_md5, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + + validate_content = kwargs.pop('validate_content', False) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + immutability_policy = kwargs.pop('immutability_policy', None) + if immutability_policy: + kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time + kwargs['immutability_policy_mode'] = immutability_policy.policy_mode + + tier = kwargs.pop('standard_blob_tier', None) + blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) + + options = { + 'blocks': block_lookup, + 'blob_http_headers': blob_headers, + 'lease_access_conditions': access_conditions, + 'timeout': kwargs.pop('timeout', None), + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers, + 'validate_content': validate_content, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'tier': tier.value if tier else None, + 'blob_tags_string': blob_tags_string, + 'headers': headers + } + options.update(kwargs) + return options + + @distributed_trace + def commit_block_list( # type: ignore + self, block_list, # type: List[BlobBlock] + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """The Commit Block List operation writes a blob by specifying the list of + block IDs that make up the blob. + + :param list block_list: + List of Blockblobs. + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict[str, str] + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._commit_block_list_options( + block_list, + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return self._client.block_blob.commit_block_list(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): + # type: (Union[str, PremiumPageBlobTier], **Any) -> None + """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. + + :param premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if premium_page_blob_tier is None: + raise ValueError("A PremiumPageBlobTier must be specified") + try: + self._client.blob.set_tier( + tier=premium_page_blob_tier, + timeout=kwargs.pop('timeout', None), + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + def _set_blob_tags_options(self, tags=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + tags = serialize_blob_tags(tags) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + options = { + 'tags': tags, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def set_blob_tags(self, tags=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. + Each call to this operation replaces all existing tags attached to the blob. To remove all + tags from the blob, call this operation with no tags set. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :param tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + :type tags: dict(str, str) + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to add tags to. + :keyword bool validate_content: + If true, calculates an MD5 hash of the tags content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + options = self._set_blob_tags_options(tags=tags, **kwargs) + try: + return self._client.blob.set_tags(**options) + except HttpResponseError as error: + process_storage_error(error) + + def _get_blob_tags_options(self, **kwargs): + # type: (**Any) -> Dict[str, str] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + options = { + 'version_id': kwargs.pop('version_id', None), + 'snapshot': self.snapshot, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_headers_and_deserialized} + return options + + @distributed_trace + def get_blob_tags(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """The Get Tags operation enables users to get tags on a blob or specific blob version, or snapshot. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to add tags to. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, str] + """ + options = self._get_blob_tags_options(**kwargs) + try: + _, tags = self._client.blob.get_tags(**options) + return parse_tags(tags) # pylint: disable=protected-access + except HttpResponseError as error: + process_storage_error(error) + + def _get_page_ranges_options( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if length is not None and offset is None: + raise ValueError("Offset value must not be None if length is set.") + if length is not None: + length = offset + length - 1 # Reformat to an inclusive range index + page_range, _ = validate_and_format_range_headers( + offset, length, start_range_required=False, end_range_required=False, align_to_page=True + ) + options = { + 'snapshot': self.snapshot, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'range': page_range} + if previous_snapshot_diff: + try: + options['prevsnapshot'] = previous_snapshot_diff.snapshot # type: ignore + except AttributeError: + try: + options['prevsnapshot'] = previous_snapshot_diff['snapshot'] # type: ignore + except TypeError: + options['prevsnapshot'] = previous_snapshot_diff + options.update(kwargs) + return options + + @distributed_trace + def get_page_ranges( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] + **kwargs + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a Page Blob or snapshot + of a page blob. + + :param int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param str previous_snapshot_diff: + The snapshot diff parameter that contains an opaque DateTime value that + specifies a previous blob snapshot to be compared + against a more recent snapshot or the current blob. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. + The first element are filled page ranges, the 2nd element is cleared page ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_page_ranges_options( + offset=offset, + length=length, + previous_snapshot_diff=previous_snapshot_diff, + **kwargs) + try: + if previous_snapshot_diff: + ranges = self._client.page_blob.get_page_ranges_diff(**options) + else: + ranges = self._client.page_blob.get_page_ranges(**options) + except HttpResponseError as error: + process_storage_error(error) + return get_page_ranges_result(ranges) + + @distributed_trace + def get_page_range_diff_for_managed_disk( + self, previous_snapshot_url, # type: str + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a managed disk or snapshot. + + .. note:: + This operation is only available for managed disk accounts. + + .. versionadded:: 12.2.0 + This operation was introduced in API version '2019-07-07'. + + :param previous_snapshot_url: + Specifies the URL of a previous snapshot of the managed disk. + The response will only contain pages that were changed between the target blob and + its previous snapshot. + :param int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. + The first element are filled page ranges, the 2nd element is cleared page ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_page_ranges_options( + offset=offset, + length=length, + prev_snapshot_url=previous_snapshot_url, + **kwargs) + try: + ranges = self._client.page_blob.get_page_ranges_diff(**options) + except HttpResponseError as error: + process_storage_error(error) + return get_page_ranges_result(ranges) + + def _set_sequence_number_options(self, sequence_number_action, sequence_number=None, **kwargs): + # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if sequence_number_action is None: + raise ValueError("A sequence number action must be specified") + options = { + 'sequence_number_action': sequence_number_action, + 'timeout': kwargs.pop('timeout', None), + 'blob_sequence_number': sequence_number, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def set_sequence_number(self, sequence_number_action, sequence_number=None, **kwargs): + # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Union[str, datetime]] + """Sets the blob sequence number. + + :param str sequence_number_action: + This property indicates how the service should modify the blob's sequence + number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. + :param str sequence_number: + This property sets the blob's sequence number. The sequence number is a + user-controlled property that you can use to track requests and manage + concurrency issues. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._set_sequence_number_options( + sequence_number_action, sequence_number=sequence_number, **kwargs) + try: + return self._client.page_blob.update_sequence_number(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _resize_blob_options(self, size, **kwargs): + # type: (int, **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if size is None: + raise ValueError("A content length must be specified for a Page Blob.") + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'blob_content_length': size, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def resize_blob(self, size, **kwargs): + # type: (int, **Any) -> Dict[str, Union[str, datetime]] + """Resizes a page blob to the specified size. + + If the specified value is less than the current size of the blob, + then all pages above the specified value are cleared. + + :param int size: + Size used to resize blob. Maximum size for a page blob is up to 1 TB. + The page blob size must be aligned to a 512-byte boundary. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._resize_blob_options(size, **kwargs) + try: + return self._client.page_blob.resize(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _upload_page_options( # type: ignore + self, page, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + if isinstance(page, six.text_type): + page = page.encode(kwargs.pop('encoding', 'UTF-8')) + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 page size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 page size") + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) # type: ignore + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + seq_conditions = SequenceNumberAccessConditions( + if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), + if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), + if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) + ) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + validate_content = kwargs.pop('validate_content', False) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'body': page[:length], + 'content_length': length, + 'transactional_content_md5': None, + 'timeout': kwargs.pop('timeout', None), + 'range': content_range, + 'lease_access_conditions': access_conditions, + 'sequence_number_access_conditions': seq_conditions, + 'modified_access_conditions': mod_conditions, + 'validate_content': validate_content, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def upload_page( # type: ignore + self, page, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """The Upload Pages operation writes a range of pages to a page blob. + + :param bytes page: + Content of the page. + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._upload_page_options( + page=page, + offset=offset, + length=length, + **kwargs) + try: + return self._client.page_blob.upload_pages(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _upload_pages_from_url_options( # type: ignore + self, source_url, # type: str + offset, # type: int + length, # type: int + source_offset, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + # TODO: extract the code to a method format_range + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 page size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 page size") + if source_offset is None or offset % 512 != 0: + raise ValueError("source_offset must be an integer that aligns with 512 page size") + + # Format range + end_range = offset + length - 1 + destination_range = 'bytes={0}-{1}'.format(offset, end_range) + source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) # should subtract 1 here? + + seq_conditions = SequenceNumberAccessConditions( + if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), + if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), + if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) + ) + source_authorization = kwargs.pop('source_authorization', None) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + source_mod_conditions = get_source_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + source_content_md5 = kwargs.pop('source_content_md5', None) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'copy_source_authorization': source_authorization, + 'source_url': source_url, + 'content_length': 0, + 'source_range': source_range, + 'range': destination_range, + 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'sequence_number_access_conditions': seq_conditions, + 'modified_access_conditions': mod_conditions, + 'source_modified_access_conditions': source_mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def upload_pages_from_url(self, source_url, # type: str + offset, # type: int + length, # type: int + source_offset, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """ + The Upload Pages operation writes a range of pages to a page blob where + the contents are read from a URL. + + :param str source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (length-offset). + :keyword bytes source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + """ + options = self._upload_pages_from_url_options( + source_url=self._encode_source_url(source_url), + offset=offset, + length=length, + source_offset=source_offset, + **kwargs + ) + try: + return self._client.page_blob.upload_pages_from_url(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _clear_page_options(self, offset, length, **kwargs): + # type: (int, int, **Any) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + seq_conditions = SequenceNumberAccessConditions( + if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), + if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), + if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) + ) + mod_conditions = get_modify_conditions(kwargs) + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 page size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 page size") + end_range = length + offset - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'content_length': 0, + 'timeout': kwargs.pop('timeout', None), + 'range': content_range, + 'lease_access_conditions': access_conditions, + 'sequence_number_access_conditions': seq_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def clear_page(self, offset, length, **kwargs): + # type: (int, int, **Any) -> Dict[str, Union[str, datetime]] + """Clears a range of pages. + + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._clear_page_options(offset, length, **kwargs) + try: + return self._client.page_blob.clear_pages(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _append_block_options( # type: ignore + self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + if isinstance(data, six.text_type): + data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore + if length is None: + length = get_length(data) + if length is None: + length, data = read_length(data) + if length == 0: + return {} + if isinstance(data, bytes): + data = data[:length] + + appendpos_condition = kwargs.pop('appendpos_condition', None) + maxsize_condition = kwargs.pop('maxsize_condition', None) + validate_content = kwargs.pop('validate_content', False) + append_conditions = None + if maxsize_condition or appendpos_condition is not None: + append_conditions = AppendPositionAccessConditions( + max_size=maxsize_condition, + append_position=appendpos_condition + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'body': data, + 'content_length': length, + 'timeout': kwargs.pop('timeout', None), + 'transactional_content_md5': None, + 'lease_access_conditions': access_conditions, + 'append_position_access_conditions': append_conditions, + 'modified_access_conditions': mod_conditions, + 'validate_content': validate_content, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def append_block( # type: ignore + self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """Commits a new block of data to the end of the existing append blob. + + :param data: + Content of the block. This can be bytes, text, an iterable or a file-like object. + :type data: bytes or str or Iterable + :param int length: + Size of the block in bytes. + :keyword bool validate_content: + If true, calculates an MD5 hash of the block content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._append_block_options( + data, + length=length, + **kwargs + ) + try: + return self._client.append_blob.append_block(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _append_block_from_url_options( # type: ignore + self, copy_source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + # If end range is provided, start range must be provided + if source_length is not None and source_offset is None: + raise ValueError("source_offset should also be specified if source_length is specified") + # Format based on whether length is present + source_range = None + if source_length is not None: + end_range = source_offset + source_length - 1 + source_range = 'bytes={0}-{1}'.format(source_offset, end_range) + elif source_offset is not None: + source_range = "bytes={0}-".format(source_offset) + + appendpos_condition = kwargs.pop('appendpos_condition', None) + maxsize_condition = kwargs.pop('maxsize_condition', None) + source_content_md5 = kwargs.pop('source_content_md5', None) + append_conditions = None + if maxsize_condition or appendpos_condition is not None: + append_conditions = AppendPositionAccessConditions( + max_size=maxsize_condition, + append_position=appendpos_condition + ) + source_authorization = kwargs.pop('source_authorization', None) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + source_mod_conditions = get_source_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'copy_source_authorization': source_authorization, + 'source_url': copy_source_url, + 'content_length': 0, + 'source_range': source_range, + 'source_content_md5': source_content_md5, + 'transactional_content_md5': None, + 'lease_access_conditions': access_conditions, + 'append_position_access_conditions': append_conditions, + 'modified_access_conditions': mod_conditions, + 'source_modified_access_conditions': source_mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + 'timeout': kwargs.pop('timeout', None)} + options.update(kwargs) + return options + + @distributed_trace + def append_block_from_url(self, copy_source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """ + Creates a new block to be committed as part of a blob, where the contents are read from a source url. + + :param str copy_source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int source_offset: + This indicates the start of the range of bytes (inclusive) that has to be taken from the copy source. + :param int source_length: + This indicates the end of the range of bytes that has to be taken from the copy source. + :keyword bytearray source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the + AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + """ + options = self._append_block_from_url_options( + copy_source_url=self._encode_source_url(copy_source_url), + source_offset=source_offset, + source_length=source_length, + **kwargs + ) + try: + return self._client.append_blob.append_block_from_url(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _seal_append_blob_options(self, **kwargs): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + appendpos_condition = kwargs.pop('appendpos_condition', None) + append_conditions = None + if appendpos_condition is not None: + append_conditions = AppendPositionAccessConditions( + append_position=appendpos_condition + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + options = { + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'append_position_access_conditions': append_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def seal_append_blob(self, **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """The Seal operation seals the Append Blob to make it read-only. + + .. versionadded:: 12.4.0 + + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._seal_append_blob_options(**kwargs) + try: + return self._client.append_blob.seal(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def _get_container_client(self): # pylint: disable=client-method-missing-kwargs + # type: (...) -> ContainerClient + """Get a client to interact with the blob's parent container. + + The container need not already exist. Defaults to current blob's credentials. + + :returns: A ContainerClient. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START get_container_client_from_blob_client] + :end-before: [END get_container_client_from_blob_client] + :language: python + :dedent: 8 + :caption: Get container client from blob object. + """ + from ._container_client import ContainerClient + if not isinstance(self._pipeline._transport, TransportWrapper): # pylint: disable = protected-access + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + else: + _pipeline = self._pipeline # pylint: disable = protected-access + return ContainerClient( + "{}://{}".format(self.scheme, self.primary_hostname), container_name=self.container_name, + credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_blob_service_client.py b/azure/multiapi/storagev2/blob/v2020_10_02/_blob_service_client.py new file mode 100644 index 0000000..33b6120 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_blob_service_client.py @@ -0,0 +1,731 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import functools +import warnings +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, + TYPE_CHECKING +) + + +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse # type: ignore + +from azure.core.paging import ItemPaged +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline import Pipeline +from azure.core.tracing.decorator import distributed_trace + +from ._shared.models import LocationMode +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.parser import _to_utc_datetime +from ._shared.response_handlers import return_response_headers, process_storage_error, \ + parse_to_internal_user_delegation_key +from ._generated import AzureBlobStorage +from ._generated.models import StorageServiceProperties, KeyInfo +from ._container_client import ContainerClient +from ._blob_client import BlobClient +from ._models import ContainerPropertiesPaged +from ._list_blobs_helper import FilteredBlobPaged +from ._serialize import get_api_version +from ._deserialize import service_stats_deserialize, service_properties_deserialize + +if TYPE_CHECKING: + from datetime import datetime + from ._shared.models import UserDelegationKey + from ._lease import BlobLeaseClient + from ._models import ( + ContainerProperties, + BlobProperties, + PublicAccess, + BlobAnalyticsLogging, + Metrics, + CorsRule, + RetentionPolicy, + StaticWebsite, + FilteredBlob + ) + + +class BlobServiceClient(StorageAccountHostsMixin): + """A client to interact with the Blob Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete containers within the account. + For operations relating to a specific container or blob, clients for those entities + can also be retrieved using the `get_client` functions. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URL to the blob storage account. Any other entities included + in the URL path (e.g. container or blob) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_blob_service_client] + :end-before: [END create_blob_service_client] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient with account url and credential. + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_blob_service_client_oauth] + :end-before: [END create_blob_service_client_oauth] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient with Azure Identity credentials. + """ + + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + _, sas_token = parse_query(parsed_url.query) + self._query_str, credential = self._format_query_string(sas_token, credential) + super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) + self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + return "{}://{}/{}".format(self.scheme, hostname, self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> BlobServiceClient + """Create BlobServiceClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :returns: A Blob service client. + :rtype: ~azure.storage.blob.BlobServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START auth_from_connection_string] + :end-before: [END auth_from_connection_string] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient from a connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls(account_url, credential=credential, **kwargs) + + @distributed_trace + def get_user_delegation_key(self, key_start_time, # type: datetime + key_expiry_time, # type: datetime + **kwargs # type: Any + ): + # type: (...) -> UserDelegationKey + """ + Obtain a user delegation key for the purpose of signing SAS tokens. + A token credential must be present on the service object for this request to succeed. + + :param ~datetime.datetime key_start_time: + A DateTime value. Indicates when the key becomes valid. + :param ~datetime.datetime key_expiry_time: + A DateTime value. Indicates when the key stops being valid. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The user delegation key. + :rtype: ~azure.storage.blob.UserDelegationKey + """ + key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) + timeout = kwargs.pop('timeout', None) + try: + user_delegation_key = self._client.service.get_user_delegation_key(key_info=key_info, + timeout=timeout, + **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore + + @distributed_trace + def get_account_information(self, **kwargs): + # type: (Any) -> Dict[str, str] + """Gets information related to the storage account. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START get_blob_service_account_info] + :end-before: [END get_blob_service_account_info] + :language: python + :dedent: 8 + :caption: Getting account information for the blob service. + """ + try: + return self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_service_stats(self, **kwargs): + # type: (**Any) -> Dict[str, Any] + """Retrieves statistics related to replication for the Blob service. + + It is only available when read-access geo-redundant replication is enabled for + the storage account. + + With geo-redundant replication, Azure Storage maintains your data durable + in two locations. In both locations, Azure Storage constantly maintains + multiple healthy replicas of your data. The location where you read, + create, update, or delete data is the primary storage account location. + The primary location exists in the region you choose at the time you + create an account via the Azure Management Azure classic portal, for + example, North Central US. The location to which your data is replicated + is the secondary location. The secondary location is automatically + determined based on the location of the primary; it is in a second data + center that resides in the same region as the primary location. Read-only + access is available from the secondary location, if read-access geo-redundant + replication is enabled for your storage account. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The blob service stats. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START get_blob_service_stats] + :end-before: [END get_blob_service_stats] + :language: python + :dedent: 8 + :caption: Getting service stats for the blob service. + """ + timeout = kwargs.pop('timeout', None) + try: + stats = self._client.service.get_statistics( # type: ignore + timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) + return service_stats_deserialize(stats) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_service_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An object containing blob service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START get_blob_service_properties] + :end-before: [END get_blob_service_properties] + :language: python + :dedent: 8 + :caption: Getting service properties for the blob service. + """ + timeout = kwargs.pop('timeout', None) + try: + service_props = self._client.service.get_properties(timeout=timeout, **kwargs) + return service_properties_deserialize(service_props) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_service_properties( + self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] + hour_metrics=None, # type: Optional[Metrics] + minute_metrics=None, # type: Optional[Metrics] + cors=None, # type: Optional[List[CorsRule]] + target_version=None, # type: Optional[str] + delete_retention_policy=None, # type: Optional[RetentionPolicy] + static_website=None, # type: Optional[StaticWebsite] + **kwargs + ): + # type: (...) -> None + """Sets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + If an element (e.g. analytics_logging) is left as None, the + existing settings on the service for that functionality are preserved. + + :param analytics_logging: + Groups the Azure Analytics Logging settings. + :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for blobs. + :type hour_metrics: ~azure.storage.blob.Metrics + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for blobs. + :type minute_metrics: ~azure.storage.blob.Metrics + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list[~azure.storage.blob.CorsRule] + :param str target_version: + Indicates the default version to use for requests if an incoming + request's version is not specified. + :param delete_retention_policy: + The delete retention policy specifies whether to retain deleted blobs. + It also specifies the number of days and versions of blob to keep. + :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy + :param static_website: + Specifies whether the static website feature is enabled, + and if yes, indicates the index document and 404 error document to use. + :type static_website: ~azure.storage.blob.StaticWebsite + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START set_blob_service_properties] + :end-before: [END set_blob_service_properties] + :language: python + :dedent: 8 + :caption: Setting service properties for the blob service. + """ + if all(parameter is None for parameter in [ + analytics_logging, hour_metrics, minute_metrics, cors, + target_version, delete_retention_policy, static_website]): + raise ValueError("set_service_properties should be called with at least one parameter") + + props = StorageServiceProperties( + logging=analytics_logging, + hour_metrics=hour_metrics, + minute_metrics=minute_metrics, + cors=cors, + default_service_version=target_version, + delete_retention_policy=delete_retention_policy, + static_website=static_website + ) + timeout = kwargs.pop('timeout', None) + try: + self._client.service.set_properties(props, timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_containers( + self, name_starts_with=None, # type: Optional[str] + include_metadata=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> ItemPaged[ContainerProperties] + """Returns a generator to list the containers under the specified account. + + The generator will lazily follow the continuation tokens returned by + the service and stop when all containers have been returned. + + :param str name_starts_with: + Filters the results to return only containers whose names + begin with the specified prefix. + :param bool include_metadata: + Specifies that container metadata to be returned in the response. + The default value is `False`. + :keyword bool include_deleted: + Specifies that deleted containers to be returned in the response. This is for container restore enabled + account. The default value is `False`. + .. versionadded:: 12.4.0 + :keyword int results_per_page: + The maximum number of container names to retrieve per API + call. If the request does not specify the server will return up to 5,000 items. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) of ContainerProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.ContainerProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_list_containers] + :end-before: [END bsc_list_containers] + :language: python + :dedent: 12 + :caption: Listing the containers in the blob service. + """ + include = ['metadata'] if include_metadata else [] + include_deleted = kwargs.pop('include_deleted', None) + if include_deleted: + include.append("deleted") + + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.service.list_containers_segment, + prefix=name_starts_with, + include=include, + timeout=timeout, + **kwargs) + return ItemPaged( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + page_iterator_class=ContainerPropertiesPaged + ) + + @distributed_trace + def find_blobs_by_tags(self, filter_expression, **kwargs): + # type: (str, **Any) -> ItemPaged[FilteredBlob] + """The Filter Blobs operation enables callers to list blobs across all + containers whose tags match a given search expression. Filter blobs + searches across all containers within a storage account but can be + scoped within the expression to a single container. + + :param str filter_expression: + The expression to find blobs whose tags matches the specified condition. + eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" + To specify a container, eg. "@container='containerName' and \"Name\"='C'" + :keyword int results_per_page: + The max result per page when paginating. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.FilteredBlob] + """ + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.service.filter_blobs, + where=filter_expression, + timeout=timeout, + **kwargs) + return ItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=FilteredBlobPaged) + + @distributed_trace + def create_container( + self, name, # type: str + metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[Union[PublicAccess, str]] + **kwargs + ): + # type: (...) -> ContainerClient + """Creates a new container under the specified account. + + If the container with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created container. + + :param str name: The name of the container to create. + :param metadata: + A dict with name-value pairs to associate with the + container as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + Possible values include: 'container', 'blob'. + :type public_access: str or ~azure.storage.blob.PublicAccess + :keyword container_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + + .. versionadded:: 12.2.0 + + :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_create_container] + :end-before: [END bsc_create_container] + :language: python + :dedent: 12 + :caption: Creating a container in the blob service. + """ + container = self.get_container_client(name) + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + container.create_container( + metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) + return container + + @distributed_trace + def delete_container( + self, container, # type: Union[ContainerProperties, str] + lease=None, # type: Optional[Union[BlobLeaseClient, str]] + **kwargs + ): + # type: (...) -> None + """Marks the specified container for deletion. + + The container and any blobs contained within it are later deleted during garbage collection. + If the container is not found, a ResourceNotFoundError will be raised. + + :param container: + The container to delete. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :param lease: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_delete_container] + :end-before: [END bsc_delete_container] + :language: python + :dedent: 12 + :caption: Deleting a container in the blob service. + """ + container = self.get_container_client(container) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + container.delete_container( # type: ignore + lease=lease, + timeout=timeout, + **kwargs) + + @distributed_trace + def _rename_container(self, name, new_name, **kwargs): + # type: (str, str, **Any) -> ContainerClient + """Renames a container. + + Operation is successful only if the source container exists. + + :param str name: + The name of the container to rename. + :param str new_name: + The new container name the user wants to rename to. + :keyword lease: + Specify this to perform only if the lease ID given + matches the active lease ID of the source container. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.ContainerClient + """ + renamed_container = self.get_container_client(new_name) + lease = kwargs.pop('lease', None) + try: + kwargs['source_lease_id'] = lease.id # type: str + except AttributeError: + kwargs['source_lease_id'] = lease + try: + renamed_container._client.container.rename(name, **kwargs) # pylint: disable = protected-access + return renamed_container + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): + # type: (str, str, **Any) -> ContainerClient + """Restores soft-deleted container. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :param str deleted_container_name: + Specifies the name of the deleted container to restore. + :param str deleted_container_version: + Specifies the version of the deleted container to restore. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.ContainerClient + """ + new_name = kwargs.pop('new_name', None) + if new_name: + warnings.warn("`new_name` is no longer supported.", DeprecationWarning) + container = self.get_container_client(new_name or deleted_container_name) + try: + container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access + deleted_container_version=deleted_container_version, + timeout=kwargs.pop('timeout', None), **kwargs) + return container + except HttpResponseError as error: + process_storage_error(error) + + def get_container_client(self, container): + # type: (Union[ContainerProperties, str]) -> ContainerClient + """Get a client to interact with the specified container. + + The container need not already exist. + + :param container: + The container. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :returns: A ContainerClient. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_get_container_client] + :end-before: [END bsc_get_container_client] + :language: python + :dedent: 8 + :caption: Getting the container client to interact with a specific container. + """ + try: + container_name = container.name + except AttributeError: + container_name = container + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ContainerClient( + self.url, container_name=container_name, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def get_blob_client( + self, container, # type: Union[ContainerProperties, str] + blob, # type: Union[BlobProperties, str] + snapshot=None # type: Optional[Union[Dict[str, Any], str]] + ): + # type: (...) -> BlobClient + """Get a client to interact with the specified blob. + + The blob need not already exist. + + :param container: + The container that the blob is in. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :param blob: + The blob with which to interact. This can either be the name of the blob, + or an instance of BlobProperties. + :type blob: str or ~azure.storage.blob.BlobProperties + :param snapshot: + The optional blob snapshot on which to operate. This can either be the ID of the snapshot, + or a dictionary output returned by :func:`~azure.storage.blob.BlobClient.create_snapshot()`. + :type snapshot: str or dict(str, Any) + :returns: A BlobClient. + :rtype: ~azure.storage.blob.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_get_blob_client] + :end-before: [END bsc_get_blob_client] + :language: python + :dedent: 12 + :caption: Getting the blob client to interact with a specific blob. + """ + try: + container_name = container.name + except AttributeError: + container_name = container + try: + blob_name = blob.name + except AttributeError: + blob_name = blob + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return BlobClient( # type: ignore + self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_container_client.py b/azure/multiapi/storagev2/blob/v2020_10_02/_container_client.py new file mode 100644 index 0000000..59f17d6 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_container_client.py @@ -0,0 +1,1551 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, AnyStr, Dict, List, Tuple, IO, Iterator, + TYPE_CHECKING +) + + +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore + +import six + +from azure.core import MatchConditions +from azure.core.exceptions import HttpResponseError, ResourceNotFoundError +from azure.core.paging import ItemPaged +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import Pipeline +from azure.core.pipeline.transport import HttpRequest + +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.request_handlers import add_metadata_headers, serialize_iso +from ._shared.response_handlers import ( + process_storage_error, + return_response_headers, + return_headers_and_deserialized) +from ._generated import AzureBlobStorage +from ._generated.models import SignedIdentifier +from ._deserialize import deserialize_container_properties +from ._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions +from ._models import ( # pylint: disable=unused-import + ContainerProperties, + BlobProperties, + BlobType) +from ._list_blobs_helper import BlobPrefix, BlobPropertiesPaged +from ._lease import BlobLeaseClient +from ._blob_client import BlobClient + +if TYPE_CHECKING: + from azure.core.pipeline.transport import HttpTransport, HttpResponse # pylint: disable=ungrouped-imports + from azure.core.pipeline.policies import HTTPPolicy # pylint: disable=ungrouped-imports + from datetime import datetime + from ._models import ( # pylint: disable=unused-import + PublicAccess, + AccessPolicy, + ContentSettings, + StandardBlobTier, + PremiumPageBlobTier) + + +def _get_blob_name(blob): + """Return the blob name. + + :param blob: A blob string or BlobProperties + :rtype: str + """ + try: + return blob.get('name') + except AttributeError: + return blob + + +class ContainerClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods + """A client to interact with a specific container, although that container + may not yet exist. + + For operations relating to a specific blob within this container, a blob client can be + retrieved using the :func:`~get_blob_client` function. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the container, + use the :func:`from_container_url` classmethod. + :param container_name: + The name of the container for the blob. + :type container_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START create_container_client_from_service] + :end-before: [END create_container_client_from_service] + :language: python + :dedent: 8 + :caption: Get a ContainerClient from an existing BlobServiceClient. + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START create_container_client_sasurl] + :end-before: [END create_container_client_sasurl] + :language: python + :dedent: 8 + :caption: Creating the container client directly. + """ + def __init__( + self, account_url, # type: str + container_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Container URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not container_name: + raise ValueError("Please specify a container name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + _, sas_token = parse_query(parsed_url.query) + self.container_name = container_name + # This parameter is used for the hierarchy traversal. Give precedence to credential. + self._raw_credential = credential if credential else sas_token + self._query_str, credential = self._format_query_string(sas_token, credential) + super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) + self._client = AzureBlobStorage(self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + def _format_url(self, hostname): + container_name = self.container_name + if isinstance(container_name, six.text_type): + container_name = container_name.encode('UTF-8') + return "{}://{}/{}{}".format( + self.scheme, + hostname, + quote(container_name), + self._query_str) + + @classmethod + def from_container_url(cls, container_url, credential=None, **kwargs): + # type: (str, Optional[Any], Any) -> ContainerClient + """Create ContainerClient from a container url. + + :param str container_url: + The full endpoint URL to the Container, including SAS token if used. This could be + either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. + :type container_url: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :returns: A container client. + :rtype: ~azure.storage.blob.ContainerClient + """ + try: + if not container_url.lower().startswith('http'): + container_url = "https://" + container_url + except AttributeError: + raise ValueError("Container URL must be a string.") + parsed_url = urlparse(container_url.rstrip('/')) + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(container_url)) + + container_path = parsed_url.path.lstrip('/').split('/') + account_path = "" + if len(container_path) > 1: + account_path = "/" + "/".join(container_path[:-1]) + account_url = "{}://{}{}?{}".format( + parsed_url.scheme, + parsed_url.netloc.rstrip('/'), + account_path, + parsed_url.query) + container_name = unquote(container_path[-1]) + if not container_name: + raise ValueError("Invalid URL. Please provide a URL with a valid container name") + return cls(account_url, container_name=container_name, credential=credential, **kwargs) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + container_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> ContainerClient + """Create ContainerClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param container_name: + The container name for the blob. + :type container_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :returns: A container client. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START auth_from_connection_string_container] + :end-before: [END auth_from_connection_string_container] + :language: python + :dedent: 8 + :caption: Creating the ContainerClient from a connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, container_name=container_name, credential=credential, **kwargs) + + @distributed_trace + def create_container(self, metadata=None, public_access=None, **kwargs): + # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None + """ + Creates a new container under the specified account. If the container + with the same name already exists, the operation fails. + + :param metadata: + A dict with name_value pairs to associate with the + container as metadata. Example:{'Category':'test'} + :type metadata: dict[str, str] + :param ~azure.storage.blob.PublicAccess public_access: + Possible values include: 'container', 'blob'. + :keyword container_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + + .. versionadded:: 12.2.0 + + :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START create_container] + :end-before: [END create_container] + :language: python + :dedent: 12 + :caption: Creating a container to store blobs. + """ + headers = kwargs.pop('headers', {}) + timeout = kwargs.pop('timeout', None) + headers.update(add_metadata_headers(metadata)) # type: ignore + container_cpk_scope_info = get_container_cpk_scope_info(kwargs) + try: + return self._client.container.create( # type: ignore + timeout=timeout, + access=public_access, + container_cpk_scope_info=container_cpk_scope_info, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def _rename_container(self, new_name, **kwargs): + # type: (str, **Any) -> ContainerClient + """Renames a container. + + Operation is successful only if the source container exists. + + :param str new_name: + The new container name the user wants to rename to. + :keyword lease: + Specify this to perform only if the lease ID given + matches the active lease ID of the source container. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.ContainerClient + """ + lease = kwargs.pop('lease', None) + try: + kwargs['source_lease_id'] = lease.id # type: str + except AttributeError: + kwargs['source_lease_id'] = lease + try: + renamed_container = ContainerClient( + "{}://{}".format(self.scheme, self.primary_hostname), container_name=new_name, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + renamed_container._client.container.rename(self.container_name, **kwargs) # pylint: disable = protected-access + return renamed_container + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def delete_container( + self, **kwargs): + # type: (Any) -> None + """ + Marks the specified container for deletion. The container and any blobs + contained within it are later deleted during garbage collection. + + :keyword lease: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START delete_container] + :end-before: [END delete_container] + :language: python + :dedent: 12 + :caption: Delete a container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + mod_conditions = get_modify_conditions(kwargs) + timeout = kwargs.pop('timeout', None) + try: + self._client.container.delete( + timeout=timeout, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def acquire_lease( + self, lease_duration=-1, # type: int + lease_id=None, # type: Optional[str] + **kwargs): + # type: (...) -> BlobLeaseClient + """ + Requests a new lease. If the container does not have an active lease, + the Blob service creates a lease on the container and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A BlobLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.blob.BlobLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START acquire_lease_on_container] + :end-before: [END acquire_lease_on_container] + :language: python + :dedent: 8 + :caption: Acquiring a lease on the container. + """ + lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) + return lease + + @distributed_trace + def get_account_information(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """Gets information related to the storage account. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + """ + try: + return self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_container_properties(self, **kwargs): + # type: (Any) -> ContainerProperties + """Returns all user-defined metadata and system properties for the specified + container. The data returned does not include the container's list of blobs. + + :keyword lease: + If specified, get_container_properties only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Properties for the specified container within a container object. + :rtype: ~azure.storage.blob.ContainerProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START get_container_properties] + :end-before: [END get_container_properties] + :language: python + :dedent: 12 + :caption: Getting properties on the container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + response = self._client.container.get_properties( + timeout=timeout, + lease_access_conditions=access_conditions, + cls=deserialize_container_properties, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + response.name = self.container_name + return response # type: ignore + + @distributed_trace + def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a container exists and returns False otherwise. + + :kwarg int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + try: + self._client.container.get_properties(**kwargs) + return True + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceNotFoundError: + return False + + @distributed_trace + def set_container_metadata( # type: ignore + self, metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + container. Each call to this operation replaces all existing metadata + attached to the container. To remove all metadata from the container, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the container as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_container_metadata only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Container-updated property dict (Etag and last modified). + :rtype: dict[str, str or datetime] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START set_container_metadata] + :end-before: [END set_container_metadata] + :language: python + :dedent: 12 + :caption: Setting metadata on the container. + """ + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + mod_conditions = get_modify_conditions(kwargs) + timeout = kwargs.pop('timeout', None) + try: + return self._client.container.set_metadata( # type: ignore + timeout=timeout, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def _get_blob_service_client(self): # pylint: disable=client-method-missing-kwargs + # type: (...) -> BlobServiceClient + """Get a client to interact with the container's parent service account. + + Defaults to current container's credentials. + + :returns: A BlobServiceClient. + :rtype: ~azure.storage.blob.BlobServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START get_blob_service_client_from_container_client] + :end-before: [END get_blob_service_client_from_container_client] + :language: python + :dedent: 8 + :caption: Get blob service client from container object. + """ + from ._blob_service_client import BlobServiceClient + if not isinstance(self._pipeline._transport, TransportWrapper): # pylint: disable = protected-access + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + else: + _pipeline = self._pipeline # pylint: disable = protected-access + return BlobServiceClient( + "{}://{}".format(self.scheme, self.primary_hostname), + credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, + _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function, + _pipeline=_pipeline) + + @distributed_trace + def get_container_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the specified container. + The permissions indicate whether container data may be accessed publicly. + + :keyword lease: + If specified, get_container_access_policy only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START get_container_access_policy] + :end-before: [END get_container_access_policy] + :language: python + :dedent: 12 + :caption: Getting the access policy on the container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + response, identifiers = self._client.container.get_access_policy( + timeout=timeout, + lease_access_conditions=access_conditions, + cls=return_headers_and_deserialized, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return { + 'public_access': response.get('blob_public_access'), + 'signed_identifiers': identifiers or [] + } + + @distributed_trace + def set_container_access_policy( + self, signed_identifiers, # type: Dict[str, AccessPolicy] + public_access=None, # type: Optional[Union[str, PublicAccess]] + **kwargs + ): # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the permissions for the specified container or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether blobs in a container may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the container. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] + :param ~azure.storage.blob.PublicAccess public_access: + Possible values include: 'container', 'blob'. + :keyword lease: + Required if the container has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified date/time. + :keyword ~datetime.datetime if_unmodified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Container-updated property dict (Etag and last modified). + :rtype: dict[str, str or ~datetime.datetime] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START set_container_access_policy] + :end-before: [END set_container_access_policy] + :language: python + :dedent: 12 + :caption: Setting access policy on the container. + """ + if len(signed_identifiers) > 5: + raise ValueError( + 'Too many access policies provided. The server does not support setting ' + 'more than 5 access policies on a single resource.') + identifiers = [] + for key, value in signed_identifiers.items(): + if value: + value.start = serialize_iso(value.start) + value.expiry = serialize_iso(value.expiry) + identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore + signed_identifiers = identifiers # type: ignore + lease = kwargs.pop('lease', None) + mod_conditions = get_modify_conditions(kwargs) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + return self._client.container.set_access_policy( + container_acl=signed_identifiers or None, + timeout=timeout, + access=public_access, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_blobs(self, name_starts_with=None, include=None, **kwargs): + # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> ItemPaged[BlobProperties] + """Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. + + :param str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param list[str] or str include: + Specifies one or more additional datasets to include in the response. + Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions', + 'tags', 'versions', 'immutabilitypolicy', 'legalhold'. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START list_blobs_in_container] + :end-before: [END list_blobs_in_container] + :language: python + :dedent: 8 + :caption: List the blobs in the container. + """ + if include and not isinstance(include, list): + include = [include] + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.list_blob_flat_segment, + include=include, + timeout=timeout, + **kwargs) + return ItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=BlobPropertiesPaged) + + @distributed_trace + def walk_blobs( + self, name_starts_with=None, # type: Optional[str] + include=None, # type: Optional[Any] + delimiter="/", # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> ItemPaged[BlobProperties] + """Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. This operation will list blobs in accordance with a hierarchy, + as delimited by the specified delimiter character. + + :param str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param list[str] include: + Specifies one or more additional datasets to include in the response. + Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. + :param str delimiter: + When the request includes this parameter, the operation returns a BlobPrefix + element in the response body that acts as a placeholder for all blobs whose + names begin with the same substring up to the appearance of the delimiter + character. The delimiter may be a single character or a string. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] + """ + if include and not isinstance(include, list): + include = [include] + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.list_blob_hierarchy_segment, + delimiter=delimiter, + include=include, + timeout=timeout, + **kwargs) + return BlobPrefix( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + delimiter=delimiter) + + @distributed_trace + def upload_blob( + self, name, # type: Union[str, BlobProperties] + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] + length=None, # type: Optional[int] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> BlobClient + """Creates a new blob from a data source with automatic chunking. + + :param name: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type name: str or ~azure.storage.blob.BlobProperties + :param data: The blob data to upload. + :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be + either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. The exception to the above is with Append + blob types: if set to False and the data already exists, an error will not be raised + and the data will be appended to the existing blob. If set overwrite=True, then the existing + append blob will be deleted, and a new one created. Defaults to False. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the container has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int max_concurrency: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :returns: A BlobClient to interact with the newly uploaded blob. + :rtype: ~azure.storage.blob.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START upload_blob_to_container] + :end-before: [END upload_blob_to_container] + :language: python + :dedent: 8 + :caption: Upload blob to the container. + """ + blob = self.get_blob_client(name) + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + blob.upload_blob( + data, + blob_type=blob_type, + length=length, + metadata=metadata, + timeout=timeout, + encoding=encoding, + **kwargs + ) + return blob + + @distributed_trace + def delete_blob( + self, blob, # type: Union[str, BlobProperties] + delete_snapshots=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> None + """Marks the specified blob or snapshot for deletion. + + The blob is later deleted during garbage collection. + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the delete_blob + operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot + and retains the blob or snapshot for specified number of days. + After specified number of days, blob's data is removed from the service during garbage collection. + Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` + option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` + + :param blob: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob: str or ~azure.storage.blob.BlobProperties + :param str delete_snapshots: + Required if the blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + blob_client = self.get_blob_client(blob) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + blob_client.delete_blob( # type: ignore + delete_snapshots=delete_snapshots, + timeout=timeout, + **kwargs) + + @distributed_trace + def download_blob(self, blob, offset=None, length=None, **kwargs): + # type: (Union[str, BlobProperties], Optional[int], Optional[int], **Any) -> StorageStreamDownloader + """Downloads a blob to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the blob into + a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. + + :param blob: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob: str or ~azure.storage.blob.BlobProperties + :param int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, download_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword str encoding: + Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.blob.StorageStreamDownloader + """ + blob_client = self.get_blob_client(blob) # type: ignore + kwargs.setdefault('merge_span', True) + return blob_client.download_blob(offset=offset, length=length, **kwargs) + + def _generate_delete_blobs_subrequest_options( + self, snapshot=None, + delete_snapshots=None, + lease_access_conditions=None, + modified_access_conditions=None, + **kwargs + ): + """This code is a copy from _generated. + + Once Autorest is able to provide request preparation this code should be removed. + """ + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + # Construct parameters + timeout = kwargs.pop('timeout', None) + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access + if timeout is not None: + query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access + + # Construct headers + header_parameters = {} + if delete_snapshots is not None: + header_parameters['x-ms-delete-snapshots'] = self._client._serialize.header( # pylint: disable=protected-access + "delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._client._serialize.header( # pylint: disable=protected-access + "lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._client._serialize.header( # pylint: disable=protected-access + "if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._client._serialize.header( # pylint: disable=protected-access + "if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._client._serialize.header( # pylint: disable=protected-access + "if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._client._serialize.header( # pylint: disable=protected-access + "if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access + + return query_parameters, header_parameters + + def _generate_delete_blobs_options(self, + *blobs, # type: List[Union[str, BlobProperties, dict]] + **kwargs + ): + timeout = kwargs.pop('timeout', None) + raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) + delete_snapshots = kwargs.pop('delete_snapshots', None) + if_modified_since = kwargs.pop('if_modified_since', None) + if_unmodified_since = kwargs.pop('if_unmodified_since', None) + if_tags_match_condition = kwargs.pop('if_tags_match_condition', None) + kwargs.update({'raise_on_any_failure': raise_on_any_failure, + 'sas': self._query_str.replace('?', '&'), + 'timeout': '&timeout=' + str(timeout) if timeout else "", + 'path': self.container_name, + 'restype': 'restype=container&' + }) + + reqs = [] + for blob in blobs: + blob_name = _get_blob_name(blob) + container_name = self.container_name + + try: + options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access + snapshot=blob.get('snapshot'), + delete_snapshots=delete_snapshots or blob.get('delete_snapshots'), + lease=blob.get('lease_id'), + if_modified_since=if_modified_since or blob.get('if_modified_since'), + if_unmodified_since=if_unmodified_since or blob.get('if_unmodified_since'), + etag=blob.get('etag'), + if_tags_match_condition=if_tags_match_condition or blob.get('if_tags_match_condition'), + match_condition=blob.get('match_condition') or MatchConditions.IfNotModified if blob.get('etag') + else None, + timeout=blob.get('timeout'), + ) + except AttributeError: + options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access + delete_snapshots=delete_snapshots, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_tags_match_condition=if_tags_match_condition + ) + + query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options(**options) + + req = HttpRequest( + "DELETE", + "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), + headers=header_parameters + ) + req.format_parameters(query_parameters) + reqs.append(req) + + return reqs, kwargs + + @distributed_trace + def delete_blobs(self, *blobs, **kwargs): + # type: (...) -> Iterator[HttpResponse] + """Marks the specified blobs or snapshots for deletion. + + The blobs are later deleted during garbage collection. + Note that in order to delete blobs, you must delete all of their + snapshots. You can delete both at the same time with the delete_blobs operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots + and retains the blobs or snapshots for specified number of days. + After specified number of days, blobs' data is removed from the service during garbage collection. + Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` + Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` + + :param blobs: + The blobs to delete. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + snapshot you want to delete: + key: 'snapshot', value type: str + whether to delete snapthots when deleting blob: + key: 'delete_snapshots', value: 'include' or 'only' + if the blob modified or not: + key: 'if_modified_since', 'if_unmodified_since', value type: datetime + etag: + key: 'etag', value type: str + match the etag or not: + key: 'match_condition', value type: MatchConditions + tags match condition: + key: 'if_tags_match_condition', value type: str + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword str delete_snapshots: + Required if a blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: An iterator of responses, one for each blob in order + :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START delete_multiple_blobs] + :end-before: [END delete_multiple_blobs] + :language: python + :dedent: 8 + :caption: Deleting multiple blobs. + """ + if len(blobs) == 0: + return iter(list()) + + reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) + + return self._batch_send(*reqs, **options) + + def _generate_set_tiers_subrequest_options( + self, tier, snapshot=None, version_id=None, rehydrate_priority=None, lease_access_conditions=None, **kwargs + ): + """This code is a copy from _generated. + + Once Autorest is able to provide request preparation this code should be removed. + """ + if not tier: + raise ValueError("A blob tier must be specified") + if snapshot and version_id: + raise ValueError("Snapshot and version_id cannot be set at the same time") + if_tags = kwargs.pop('if_tags', None) + + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "tier" + timeout = kwargs.pop('timeout', None) + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access + if version_id is not None: + query_parameters['versionid'] = self._client._serialize.query("version_id", version_id, 'str') # pylint: disable=protected-access + if timeout is not None: + query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access + query_parameters['comp'] = self._client._serialize.query("comp", comp, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call + + # Construct headers + header_parameters = {} + header_parameters['x-ms-access-tier'] = self._client._serialize.header("tier", tier, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call + if rehydrate_priority is not None: + header_parameters['x-ms-rehydrate-priority'] = self._client._serialize.header( # pylint: disable=protected-access + "rehydrate_priority", rehydrate_priority, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._client._serialize.header("lease_id", lease_id, 'str') # pylint: disable=protected-access + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access + + return query_parameters, header_parameters + + def _generate_set_tiers_options(self, + blob_tier, # type: Optional[Union[str, StandardBlobTier, PremiumPageBlobTier]] + *blobs, # type: List[Union[str, BlobProperties, dict]] + **kwargs + ): + timeout = kwargs.pop('timeout', None) + raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) + rehydrate_priority = kwargs.pop('rehydrate_priority', None) + if_tags = kwargs.pop('if_tags_match_condition', None) + kwargs.update({'raise_on_any_failure': raise_on_any_failure, + 'sas': self._query_str.replace('?', '&'), + 'timeout': '&timeout=' + str(timeout) if timeout else "", + 'path': self.container_name, + 'restype': 'restype=container&' + }) + + reqs = [] + for blob in blobs: + blob_name = _get_blob_name(blob) + container_name = self.container_name + + try: + tier = blob_tier or blob.get('blob_tier') + query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( + tier=tier, + snapshot=blob.get('snapshot'), + version_id=blob.get('version_id'), + rehydrate_priority=rehydrate_priority or blob.get('rehydrate_priority'), + lease_access_conditions=blob.get('lease_id'), + if_tags=if_tags or blob.get('if_tags_match_condition'), + timeout=timeout or blob.get('timeout') + ) + except AttributeError: + query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( + blob_tier, rehydrate_priority=rehydrate_priority, if_tags=if_tags) + + req = HttpRequest( + "PUT", + "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), + headers=header_parameters + ) + req.format_parameters(query_parameters) + reqs.append(req) + + return reqs, kwargs + + @distributed_trace + def set_standard_blob_tier_blobs( + self, + standard_blob_tier, # type: Optional[Union[str, StandardBlobTier]] + *blobs, # type: List[Union[str, BlobProperties, dict]] + **kwargs + ): + # type: (...) -> Iterator[HttpResponse] + """This operation sets the tier on block blobs. + + A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + :param standard_blob_tier: + Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', + 'Archive'. The hot tier is optimized for storing data that is accessed + frequently. The cool storage tier is optimized for storing data that + is infrequently accessed and stored for at least a month. The archive + tier is optimized for storing data that is rarely accessed and stored + for at least six months with flexible latency requirements. + + .. note:: + If you want to set different tier on different blobs please set this positional parameter to None. + Then the blob tier on every BlobProperties will be taken. + + :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier + :param blobs: + The blobs with which to interact. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + standard blob tier: + key: 'blob_tier', value type: StandardBlobTier + rehydrate priority: + key: 'rehydrate_priority', value type: RehydratePriority + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + snapshot: + key: "snapshost", value type: str + version id: + key: "version_id", value type: str + tags match condition: + key: 'if_tags_match_condition', value type: str + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. + :return: An iterator of responses, one for each blob in order + :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] + """ + reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) + + return self._batch_send(*reqs, **options) + + @distributed_trace + def set_premium_page_blob_tier_blobs( + self, + premium_page_blob_tier, # type: Optional[Union[str, PremiumPageBlobTier]] + *blobs, # type: List[Union[str, BlobProperties, dict]] + **kwargs + ): + # type: (...) -> Iterator[HttpResponse] + """Sets the page blob tiers on all blobs. This API is only supported for page blobs on premium accounts. + + :param premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + + .. note:: + If you want to set different tier on different blobs please set this positional parameter to None. + Then the blob tier on every BlobProperties will be taken. + + :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :param blobs: + The blobs with which to interact. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + premium blob tier: + key: 'blob_tier', value type: PremiumPageBlobTier + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. + :return: An iterator of responses, one for each blob in order + :rtype: iterator[~azure.core.pipeline.transport.HttpResponse] + """ + reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) + + return self._batch_send(*reqs, **options) + + def get_blob_client( + self, blob, # type: Union[str, BlobProperties] + snapshot=None # type: str + ): + # type: (...) -> BlobClient + """Get a client to interact with the specified blob. + + The blob need not already exist. + + :param blob: + The blob with which to interact. + :type blob: str or ~azure.storage.blob.BlobProperties + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`~BlobClient.create_snapshot()`. + :returns: A BlobClient. + :rtype: ~azure.storage.blob.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START get_blob_client] + :end-before: [END get_blob_client] + :language: python + :dedent: 8 + :caption: Get the blob client. + """ + blob_name = _get_blob_name(blob) + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return BlobClient( + self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_deserialize.py b/azure/multiapi/storagev2/blob/v2020_10_02/_deserialize.py new file mode 100644 index 0000000..c724753 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_deserialize.py @@ -0,0 +1,169 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use +from typing import ( # pylint: disable=unused-import + Tuple, Dict, List, + TYPE_CHECKING +) + +from ._models import BlobType, CopyProperties, ContentSettings, LeaseProperties, BlobProperties, ImmutabilityPolicy +from ._shared.models import get_enum_value + +from ._shared.response_handlers import deserialize_metadata +from ._models import ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \ + StaticWebsite, ObjectReplicationPolicy, ObjectReplicationRule + +if TYPE_CHECKING: + from ._generated.models import PageList + + +def deserialize_pipeline_response_into_cls(cls_method, response, obj, headers): + try: + deserialized_response = response.http_response + except AttributeError: + deserialized_response = response + return cls_method(deserialized_response, obj, headers) + + +def deserialize_blob_properties(response, obj, headers): + blob_properties = BlobProperties( + metadata=deserialize_metadata(response, obj, headers), + object_replication_source_properties=deserialize_ors_policies(response.http_response.headers), + **headers + ) + if 'Content-Range' in headers: + if 'x-ms-blob-content-md5' in headers: + blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] + else: + blob_properties.content_settings.content_md5 = None + return blob_properties + + +def deserialize_ors_policies(policy_dictionary): + + if policy_dictionary is None: + return None + # For source blobs (blobs that have policy ids and rule ids applied to them), + # the header will be formatted as "x-ms-or-_: {Complete, Failed}". + # The value of this header is the status of the replication. + or_policy_status_headers = {key: val for key, val in policy_dictionary.items() + if 'or-' in key and key != 'x-ms-or-policy-id'} + + parsed_result = {} + + for key, val in or_policy_status_headers.items(): + # list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule + policy_and_rule_ids = key.split('or-')[1].split('_') + policy_id = policy_and_rule_ids[0] + rule_id = policy_and_rule_ids[1] + + # If we are seeing this policy for the first time, create a new list to store rule_id -> result + parsed_result[policy_id] = parsed_result.get(policy_id) or list() + parsed_result[policy_id].append(ObjectReplicationRule(rule_id=rule_id, status=val)) + + result_list = [ObjectReplicationPolicy(policy_id=k, rules=v) for k, v in parsed_result.items()] + + return result_list + + +def deserialize_blob_stream(response, obj, headers): + blob_properties = deserialize_blob_properties(response, obj, headers) + obj.properties = blob_properties + return response.http_response.location_mode, obj + + +def deserialize_container_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + container_properties = ContainerProperties( + metadata=metadata, + **headers + ) + return container_properties + + +def get_page_ranges_result(ranges): + # type: (PageList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + page_range = [] # type: ignore + clear_range = [] # type: List + if ranges.page_range: + page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range] # type: ignore + if ranges.clear_range: + clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range] + return page_range, clear_range # type: ignore + + +def service_stats_deserialize(generated): + """Deserialize a ServiceStats objects into a dict. + """ + return { + 'geo_replication': { + 'status': generated.geo_replication.status, + 'last_sync_time': generated.geo_replication.last_sync_time, + } + } + + +def service_properties_deserialize(generated): + """Deserialize a ServiceProperties objects into a dict. + """ + return { + 'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-access + 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access + 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access + 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access + 'target_version': generated.default_service_version, # pylint: disable=protected-access + 'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access + 'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access + } + + +def get_blob_properties_from_generated_code(generated): + blob = BlobProperties() + blob.name = generated.name + blob_type = get_enum_value(generated.properties.blob_type) + blob.blob_type = BlobType(blob_type) if blob_type else None + blob.etag = generated.properties.etag + blob.deleted = generated.deleted + blob.snapshot = generated.snapshot + blob.is_append_blob_sealed = generated.properties.is_sealed + blob.metadata = generated.metadata.additional_properties if generated.metadata else {} + blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None + blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access + blob.last_modified = generated.properties.last_modified + blob.creation_time = generated.properties.creation_time + blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access + blob.size = generated.properties.content_length + blob.page_blob_sequence_number = generated.properties.blob_sequence_number + blob.server_encrypted = generated.properties.server_encrypted + blob.encryption_scope = generated.properties.encryption_scope + blob.deleted_time = generated.properties.deleted_time + blob.remaining_retention_days = generated.properties.remaining_retention_days + blob.blob_tier = generated.properties.access_tier + blob.rehydrate_priority = generated.properties.rehydrate_priority + blob.blob_tier_inferred = generated.properties.access_tier_inferred + blob.archive_status = generated.properties.archive_status + blob.blob_tier_change_time = generated.properties.access_tier_change_time + blob.version_id = generated.version_id + blob.is_current_version = generated.is_current_version + blob.tag_count = generated.properties.tag_count + blob.tags = parse_tags(generated.blob_tags) # pylint: disable=protected-access + blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata) + blob.last_accessed_on = generated.properties.last_accessed_on + blob.immutability_policy = ImmutabilityPolicy._from_generated(generated) # pylint: disable=protected-access + blob.has_legal_hold = generated.properties.legal_hold + blob.has_versions_only = generated.has_versions_only + return blob + + +def parse_tags(generated_tags): + # type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None] + """Deserialize a list of BlobTag objects into a dict. + """ + if generated_tags: + tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set} + return tag_dict + return None diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_download.py b/azure/multiapi/storagev2/blob/v2020_10_02/_download.py new file mode 100644 index 0000000..05bdbd0 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_download.py @@ -0,0 +1,636 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +import threading +import time + +import warnings +from io import BytesIO +from typing import Iterator + +import requests +from azure.core.exceptions import HttpResponseError, ServiceResponseError + +from azure.core.tracing.common import with_current_context +from ._shared.encryption import decrypt_blob +from ._shared.request_handlers import validate_and_format_range_headers +from ._shared.response_handlers import process_storage_error, parse_length_from_content_range +from ._deserialize import get_page_ranges_result + + +def process_range_and_offset(start_range, end_range, length, encryption): + start_offset, end_offset = 0, 0 + if encryption.get("key") is not None or encryption.get("resolver") is not None: + if start_range is not None: + # Align the start of the range along a 16 byte block + start_offset = start_range % 16 + start_range -= start_offset + + # Include an extra 16 bytes for the IV if necessary + # Because of the previous offsetting, start_range will always + # be a multiple of 16. + if start_range > 0: + start_offset += 16 + start_range -= 16 + + if length is not None: + # Align the end of the range along a 16 byte block + end_offset = 15 - (end_range % 16) + end_range += end_offset + + return (start_range, end_range), (start_offset, end_offset) + + +def process_content(data, start_offset, end_offset, encryption): + if data is None: + raise ValueError("Response cannot be None.") + + content = b"".join(list(data)) + + if content and encryption.get("key") is not None or encryption.get("resolver") is not None: + try: + return decrypt_blob( + encryption.get("required"), + encryption.get("key"), + encryption.get("resolver"), + content, + start_offset, + end_offset, + data.response.headers, + ) + except Exception as error: + raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) + return content + + +class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + client=None, + non_empty_ranges=None, + total_size=None, + chunk_size=None, + current_progress=None, + start_range=None, + end_range=None, + stream=None, + parallel=None, + validate_content=None, + encryption_options=None, + **kwargs + ): + self.client = client + self.non_empty_ranges = non_empty_ranges + + # Information on the download range/chunk size + self.chunk_size = chunk_size + self.total_size = total_size + self.start_index = start_range + self.end_index = end_range + + # The destination that we will write to + self.stream = stream + self.stream_lock = threading.Lock() if parallel else None + self.progress_lock = threading.Lock() if parallel else None + + # For a parallel download, the stream is always seekable, so we note down the current position + # in order to seek to the right place when out-of-order chunks come in + self.stream_start = stream.tell() if parallel else None + + # Download progress so far + self.progress_total = current_progress + + # Encryption + self.encryption_options = encryption_options + + # Parameters for each get operation + self.validate_content = validate_content + self.request_options = kwargs + + def _calculate_range(self, chunk_start): + if chunk_start + self.chunk_size > self.end_index: + chunk_end = self.end_index + else: + chunk_end = chunk_start + self.chunk_size + return chunk_start, chunk_end + + def get_chunk_offsets(self): + index = self.start_index + while index < self.end_index: + yield index + index += self.chunk_size + + def process_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + chunk_data = self._download_chunk(chunk_start, chunk_end - 1) + length = chunk_end - chunk_start + if length > 0: + self._write_to_stream(chunk_data, chunk_start) + self._update_progress(length) + + def yield_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + return self._download_chunk(chunk_start, chunk_end - 1) + + def _update_progress(self, length): + if self.progress_lock: + with self.progress_lock: # pylint: disable=not-context-manager + self.progress_total += length + else: + self.progress_total += length + + def _write_to_stream(self, chunk_data, chunk_start): + if self.stream_lock: + with self.stream_lock: # pylint: disable=not-context-manager + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + else: + self.stream.write(chunk_data) + + def _do_optimize(self, given_range_start, given_range_end): + # If we have no page range list stored, then assume there's data everywhere for that page blob + # or it's a block blob or append blob + if self.non_empty_ranges is None: + return False + + for source_range in self.non_empty_ranges: + # Case 1: As the range list is sorted, if we've reached such a source_range + # we've checked all the appropriate source_range already and haven't found any overlapping. + # so the given range doesn't have any data and download optimization could be applied. + # given range: | | + # source range: | | + if given_range_end < source_range['start']: # pylint:disable=no-else-return + return True + # Case 2: the given range comes after source_range, continue checking. + # given range: | | + # source range: | | + elif source_range['end'] < given_range_start: + pass + # Case 3: source_range and given range overlap somehow, no need to optimize. + else: + return False + # Went through all src_ranges, but nothing overlapped. Optimization will be applied. + return True + + def _download_chunk(self, chunk_start, chunk_end): + download_range, offset = process_range_and_offset( + chunk_start, chunk_end, chunk_end, self.encryption_options + ) + + # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. + # Do optimize and create empty chunk locally if condition is met. + if self._do_optimize(download_range[0], download_range[1]): + chunk_data = b"\x00" * self.chunk_size + else: + range_header, range_validation = validate_and_format_range_headers( + download_range[0], + download_range[1], + check_content_md5=self.validate_content + ) + + retry_active = True + retry_total = 3 + while retry_active: + try: + _, response = self.client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self.validate_content, + data_stream_total=self.total_size, + download_stream_current=self.progress_total, + **self.request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + try: + chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) + retry_active = False + except (requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError) as error: + retry_total -= 1 + if retry_total <= 0: + raise ServiceResponseError(error, error=error) + time.sleep(1) + + # This makes sure that if_match is set so that we can validate + # that subsequent downloads are to an unmodified blob + if self.request_options.get("modified_access_conditions"): + self.request_options["modified_access_conditions"].if_match = response.properties.etag + + return chunk_data + + +class _ChunkIterator(object): + """Async iterator for chunks in blob download stream.""" + + def __init__(self, size, content, downloader, chunk_size): + self.size = size + self._chunk_size = chunk_size + self._current_content = content + self._iter_downloader = downloader + self._iter_chunks = None + self._complete = (size == 0) + + def __len__(self): + return self.size + + def __iter__(self): + return self + + def __next__(self): + """Iterate through responses.""" + if self._complete: + raise StopIteration("Download complete") + if not self._iter_downloader: + # cut the data obtained from initial GET into chunks + if len(self._current_content) > self._chunk_size: + return self._get_chunk_data() + self._complete = True + return self._current_content + + if not self._iter_chunks: + self._iter_chunks = self._iter_downloader.get_chunk_offsets() + + # initial GET result still has more than _chunk_size bytes of data + if len(self._current_content) >= self._chunk_size: + return self._get_chunk_data() + + try: + chunk = next(self._iter_chunks) + self._current_content += self._iter_downloader.yield_chunk(chunk) + except StopIteration as e: + self._complete = True + if self._current_content: + return self._current_content + raise e + + # the current content from the first get is still there but smaller than chunk size + # therefore we want to make sure its also included + return self._get_chunk_data() + + next = __next__ # Python 2 compatibility. + + def _get_chunk_data(self): + chunk_data = self._current_content[: self._chunk_size] + self._current_content = self._current_content[self._chunk_size:] + return chunk_data + + +class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the blob being downloaded. + :ivar str container: + The name of the container where the blob is. + :ivar ~azure.storage.blob.BlobProperties properties: + The properties of the blob being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if specified, + otherwise the total size of the blob. + """ + + def __init__( + self, + clients=None, + config=None, + start_range=None, + end_range=None, + validate_content=None, + encryption_options=None, + max_concurrency=1, + name=None, + container=None, + encoding=None, + **kwargs + ): + self.name = name + self.container = container + self.properties = None + self.size = None + + self._clients = clients + self._config = config + self._start_range = start_range + self._end_range = end_range + self._max_concurrency = max_concurrency + self._encoding = encoding + self._validate_content = validate_content + self._encryption_options = encryption_options or {} + self._request_options = kwargs + self._location_mode = None + self._download_complete = False + self._current_content = None + self._file_size = None + self._non_empty_ranges = None + self._response = None + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + self._first_get_size = ( + self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size + ) + initial_request_start = self._start_range if self._start_range is not None else 0 + if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: + initial_request_end = self._end_range + else: + initial_request_end = initial_request_start + self._first_get_size - 1 + + self._initial_range, self._initial_offset = process_range_and_offset( + initial_request_start, initial_request_end, self._end_range, self._encryption_options + ) + + self._response = self._initial_request() + self.properties = self._response.properties + self.properties.name = self.name + self.properties.container = self.container + + # Set the content length to the download size instead of the size of + # the last range + self.properties.size = self.size + + # Overwrite the content range to the user requested range + self.properties.content_range = "bytes {0}-{1}/{2}".format( + self._start_range, + self._end_range, + self._file_size + ) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + self.properties.content_md5 = None + + def __len__(self): + return self.size + + def _initial_request(self): + range_header, range_validation = validate_and_format_range_headers( + self._initial_range[0], + self._initial_range[1], + start_range_required=False, + end_range_required=False, + check_content_md5=self._validate_content + ) + + retry_active = True + retry_total = 3 + while retry_active: + try: + location_mode, response = self._clients.blob.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self._validate_content, + data_stream_total=None, + download_stream_current=0, + **self._request_options + ) + + # Check the location we read from to ensure we use the same one + # for subsequent requests. + self._location_mode = location_mode + + # Parse the total file size and adjust the download size if ranges + # were specified + self._file_size = parse_length_from_content_range(response.properties.content_range) + if self._end_range is not None: + # Use the end range index unless it is over the end of the file + self.size = min(self._file_size, self._end_range - self._start_range + 1) + elif self._start_range is not None: + self.size = self._file_size - self._start_range + else: + self.size = self._file_size + + except HttpResponseError as error: + if self._start_range is None and error.response.status_code == 416: + # Get range will fail on an empty file. If the user did not + # request a range, do a regular get request in order to get + # any properties. + try: + _, response = self._clients.blob.download( + validate_content=self._validate_content, + data_stream_total=0, + download_stream_current=0, + **self._request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + # Set the download size to empty + self.size = 0 + self._file_size = 0 + else: + process_storage_error(error) + + try: + if self.size == 0: + self._current_content = b"" + else: + self._current_content = process_content( + response, + self._initial_offset[0], + self._initial_offset[1], + self._encryption_options + ) + retry_active = False + except (requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError) as error: + retry_total -= 1 + if retry_total <= 0: + raise ServiceResponseError(error, error=error) + time.sleep(1) + + # get page ranges to optimize downloading sparse page blob + if response.properties.blob_type == 'PageBlob': + try: + page_ranges = self._clients.page_blob.get_page_ranges() + self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] + # according to the REST API documentation: + # in a highly fragmented page blob with a large number of writes, + # a Get Page Ranges request can fail due to an internal server timeout. + # thus, if the page blob is not sparse, it's ok for it to fail + except HttpResponseError: + pass + + # If the file is small, the download is complete at this point. + # If file size is large, download the rest of the file in chunks. + if response.properties.size != self.size: + if self._request_options.get("modified_access_conditions"): + self._request_options["modified_access_conditions"].if_match = response.properties.etag + else: + self._download_complete = True + return response + + def chunks(self): + # type: () -> Iterator[bytes] + """Iterate over chunks in the download stream. + + :rtype: Iterator[bytes] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world.py + :start-after: [START download_a_blob_in_chunk] + :end-before: [END download_a_blob_in_chunk] + :language: python + :dedent: 12 + :caption: Download a blob using chunks(). + """ + if self.size == 0 or self._download_complete: + iter_downloader = None + else: + data_end = self._file_size + if self._end_range is not None: + # Use the end range index unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + iter_downloader = _ChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # start where the first download ended + end_range=data_end, + stream=None, + parallel=False, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options + ) + return _ChunkIterator( + size=self.size, + content=self._current_content, + downloader=iter_downloader, + chunk_size=self._config.max_chunk_get_size) + + def readall(self): + """Download the contents of this blob. + + This operation is blocking until all data is downloaded. + + :rtype: bytes or str + """ + stream = BytesIO() + self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + def content_as_bytes(self, max_concurrency=1): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :rtype: bytes + """ + warnings.warn( + "content_as_bytes is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + return self.readall() + + def content_as_text(self, max_concurrency=1, encoding="UTF-8"): + """Download the contents of this blob, and decode as text. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :param str encoding: + Test encoding to decode the downloaded bytes. Default is UTF-8. + :rtype: str + """ + warnings.warn( + "content_as_text is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self._encoding = encoding + return self.readall() + + def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + # The stream must be seekable if parallel download is required + parallel = self._max_concurrency > 1 + if parallel: + error_message = "Target stream handle must be seekable." + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(error_message) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(error_message) + + # Write the content to the user stream + stream.write(self._current_content) + if self._download_complete: + return self.size + + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + downloader = _ChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # Start where the first download ended + end_range=data_end, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options + ) + if parallel: + import concurrent.futures + with concurrent.futures.ThreadPoolExecutor(self._max_concurrency) as executor: + list(executor.map( + with_current_context(downloader.process_chunk), + downloader.get_chunk_offsets() + )) + else: + for chunk in downloader.get_chunk_offsets(): + downloader.process_chunk(chunk) + return self.size + + def download_to_stream(self, stream, max_concurrency=1): + """Download the contents of this blob to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The properties of the downloaded blob. + :rtype: Any + """ + warnings.warn( + "download_to_stream is deprecated, use readinto instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self.readinto(stream) + return self.properties diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/__init__.py new file mode 100644 index 0000000..cc760e7 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/__init__.py @@ -0,0 +1,16 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._azure_blob_storage import AzureBlobStorage +__all__ = ['AzureBlobStorage'] + +try: + from ._patch import patch_sdk # type: ignore + patch_sdk() +except ImportError: + pass diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/_azure_blob_storage.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/_azure_blob_storage.py new file mode 100644 index 0000000..d93c6ec --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/_azure_blob_storage.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING + +from azure.core import PipelineClient +from msrest import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + + from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from ._configuration import AzureBlobStorageConfiguration +from .operations import ServiceOperations +from .operations import ContainerOperations +from .operations import DirectoryOperations +from .operations import BlobOperations +from .operations import PageBlobOperations +from .operations import AppendBlobOperations +from .operations import BlockBlobOperations +from . import models + + +class AzureBlobStorage(object): + """AzureBlobStorage. + + :ivar service: ServiceOperations operations + :vartype service: azure.storage.blob.operations.ServiceOperations + :ivar container: ContainerOperations operations + :vartype container: azure.storage.blob.operations.ContainerOperations + :ivar directory: DirectoryOperations operations + :vartype directory: azure.storage.blob.operations.DirectoryOperations + :ivar blob: BlobOperations operations + :vartype blob: azure.storage.blob.operations.BlobOperations + :ivar page_blob: PageBlobOperations operations + :vartype page_blob: azure.storage.blob.operations.PageBlobOperations + :ivar append_blob: AppendBlobOperations operations + :vartype append_blob: azure.storage.blob.operations.AppendBlobOperations + :ivar block_blob: BlockBlobOperations operations + :vartype block_blob: azure.storage.blob.operations.BlockBlobOperations + :param url: The URL of the service account, container, or blob that is the target of the desired operation. + :type url: str + """ + + def __init__( + self, + url, # type: str + **kwargs # type: Any + ): + # type: (...) -> None + base_url = '{url}' + self._config = AzureBlobStorageConfiguration(url, **kwargs) + self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._serialize.client_side_validation = False + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.container = ContainerOperations( + self._client, self._config, self._serialize, self._deserialize) + self.directory = DirectoryOperations( + self._client, self._config, self._serialize, self._deserialize) + self.blob = BlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.page_blob = PageBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.append_blob = AppendBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.block_blob = BlockBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + + def _send_request(self, http_request, **kwargs): + # type: (HttpRequest, Any) -> HttpResponse + """Runs the network request through the client's chained policies. + + :param http_request: The network request you want to make. Required. + :type http_request: ~azure.core.pipeline.transport.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to True. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.pipeline.transport.HttpResponse + """ + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + http_request.url = self._client.format_url(http_request.url, **path_format_arguments) + stream = kwargs.pop("stream", True) + pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs) + return pipeline_response.http_response + + def close(self): + # type: () -> None + self._client.close() + + def __enter__(self): + # type: () -> AzureBlobStorage + self._client.__enter__() + return self + + def __exit__(self, *exc_details): + # type: (Any) -> None + self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/_configuration.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/_configuration.py new file mode 100644 index 0000000..1c0d9c8 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/_configuration.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + +VERSION = "unknown" + +class AzureBlobStorageConfiguration(Configuration): + """Configuration for AzureBlobStorage. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, container, or blob that is the target of the desired operation. + :type url: str + """ + + def __init__( + self, + url, # type: str + **kwargs # type: Any + ): + # type: (...) -> None + if url is None: + raise ValueError("Parameter 'url' must not be None.") + super(AzureBlobStorageConfiguration, self).__init__(**kwargs) + + self.url = url + self.version = "2020-10-02" + kwargs.setdefault('sdk_moniker', 'azureblobstorage/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs # type: Any + ): + # type: (...) -> None + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/__init__.py new file mode 100644 index 0000000..12cfcf6 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/__init__.py @@ -0,0 +1,10 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._azure_blob_storage import AzureBlobStorage +__all__ = ['AzureBlobStorage'] diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/_azure_blob_storage.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/_azure_blob_storage.py new file mode 100644 index 0000000..b945951 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/_azure_blob_storage.py @@ -0,0 +1,101 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core import AsyncPipelineClient +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from msrest import Deserializer, Serializer + +from ._configuration import AzureBlobStorageConfiguration +from .operations import ServiceOperations +from .operations import ContainerOperations +from .operations import DirectoryOperations +from .operations import BlobOperations +from .operations import PageBlobOperations +from .operations import AppendBlobOperations +from .operations import BlockBlobOperations +from .. import models + + +class AzureBlobStorage(object): + """AzureBlobStorage. + + :ivar service: ServiceOperations operations + :vartype service: azure.storage.blob.aio.operations.ServiceOperations + :ivar container: ContainerOperations operations + :vartype container: azure.storage.blob.aio.operations.ContainerOperations + :ivar directory: DirectoryOperations operations + :vartype directory: azure.storage.blob.aio.operations.DirectoryOperations + :ivar blob: BlobOperations operations + :vartype blob: azure.storage.blob.aio.operations.BlobOperations + :ivar page_blob: PageBlobOperations operations + :vartype page_blob: azure.storage.blob.aio.operations.PageBlobOperations + :ivar append_blob: AppendBlobOperations operations + :vartype append_blob: azure.storage.blob.aio.operations.AppendBlobOperations + :ivar block_blob: BlockBlobOperations operations + :vartype block_blob: azure.storage.blob.aio.operations.BlockBlobOperations + :param url: The URL of the service account, container, or blob that is the target of the desired operation. + :type url: str + """ + + def __init__( + self, + url: str, + **kwargs: Any + ) -> None: + base_url = '{url}' + self._config = AzureBlobStorageConfiguration(url, **kwargs) + self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._serialize.client_side_validation = False + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.container = ContainerOperations( + self._client, self._config, self._serialize, self._deserialize) + self.directory = DirectoryOperations( + self._client, self._config, self._serialize, self._deserialize) + self.blob = BlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.page_blob = PageBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.append_blob = AppendBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + self.block_blob = BlockBlobOperations( + self._client, self._config, self._serialize, self._deserialize) + + async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse: + """Runs the network request through the client's chained policies. + + :param http_request: The network request you want to make. Required. + :type http_request: ~azure.core.pipeline.transport.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to True. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.pipeline.transport.AsyncHttpResponse + """ + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + http_request.url = self._client.format_url(http_request.url, **path_format_arguments) + stream = kwargs.pop("stream", True) + pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs) + return pipeline_response.http_response + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "AzureBlobStorage": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details) -> None: + await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/_configuration.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/_configuration.py new file mode 100644 index 0000000..bb5c749 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/_configuration.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +VERSION = "unknown" + +class AzureBlobStorageConfiguration(Configuration): + """Configuration for AzureBlobStorage. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, container, or blob that is the target of the desired operation. + :type url: str + """ + + def __init__( + self, + url: str, + **kwargs: Any + ) -> None: + if url is None: + raise ValueError("Parameter 'url' must not be None.") + super(AzureBlobStorageConfiguration, self).__init__(**kwargs) + + self.url = url + self.version = "2020-10-02" + kwargs.setdefault('sdk_moniker', 'azureblobstorage/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs: Any + ) -> None: + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/__init__.py new file mode 100644 index 0000000..62f85c9 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/__init__.py @@ -0,0 +1,25 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._container_operations import ContainerOperations +from ._directory_operations import DirectoryOperations +from ._blob_operations import BlobOperations +from ._page_blob_operations import PageBlobOperations +from ._append_blob_operations import AppendBlobOperations +from ._block_blob_operations import BlockBlobOperations + +__all__ = [ + 'ServiceOperations', + 'ContainerOperations', + 'DirectoryOperations', + 'BlobOperations', + 'PageBlobOperations', + 'AppendBlobOperations', + 'BlockBlobOperations', +] diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_append_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_append_blob_operations.py new file mode 100644 index 0000000..4d18668 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_append_blob_operations.py @@ -0,0 +1,726 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class AppendBlobOperations: + """AppendBlobOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create( + self, + content_length: int, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Create Append Blob operation creates a new append blob. + + :param content_length: The length of the request. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. + :type legal_hold: bool + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + blob_type = "AppendBlob" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if immutability_policy_expiry is not None: + header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') + if immutability_policy_mode is not None: + header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') + if legal_hold is not None: + header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def append_block( + self, + content_length: int, + body: IO, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytearray] = None, + transactional_content_crc64: Optional[bytearray] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Append Block operation commits a new block of data to the end of an existing append blob. + The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to + AppendBlob. Append Block is supported only on version 2015-02-21 version or later. + + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param append_position_access_conditions: Parameter group. + :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _max_size = None + _append_position = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if append_position_access_conditions is not None: + _max_size = append_position_access_conditions.max_size + _append_position = append_position_access_conditions.append_position + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "appendblock" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.append_block.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _max_size is not None: + header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') + if _append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + append_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def append_block_from_url( + self, + source_url: str, + content_length: int, + source_range: Optional[str] = None, + source_content_md5: Optional[bytearray] = None, + source_contentcrc64: Optional[bytearray] = None, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytearray] = None, + request_id_parameter: Optional[str] = None, + copy_source_authorization: Optional[str] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Append Block operation commits a new block of data to the end of an existing append blob + where the contents are read from a source url. The Append Block operation is permitted only if + the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on + version 2015-02-21 version or later. + + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param content_length: The length of the request. + :type content_length: long + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. + :type copy_source_authorization: str + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param append_position_access_conditions: Parameter group. + :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _max_size = None + _append_position = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if append_position_access_conditions is not None: + _max_size = append_position_access_conditions.max_size + _append_position = append_position_access_conditions.append_position + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + comp = "appendblock" + accept = "application/xml" + + # Construct URL + url = self.append_block_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _max_size is not None: + header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') + if _append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if copy_source_authorization is not None: + header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def seal( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + append_position_access_conditions: Optional["_models.AppendPositionAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Seal operation seals the Append Blob to make it read-only. Seal is supported only on + version 2019-12-12 version or later. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param append_position_access_conditions: Parameter group. + :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _append_position = None + if append_position_access_conditions is not None: + _append_position = append_position_access_conditions.append_position + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + comp = "seal" + accept = "application/xml" + + # Construct URL + url = self.seal.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) + + if cls: + return cls(pipeline_response, None, response_headers) + + seal.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_blob_operations.py new file mode 100644 index 0000000..f2d0642 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_blob_operations.py @@ -0,0 +1,3425 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class BlobOperations: + """BlobOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def download( + self, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + range_get_content_md5: Optional[bool] = None, + range_get_content_crc64: Optional[bool] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> IO: + """The Download operation reads or downloads a blob from the system, including its metadata and + properties. You can also call Download to read a snapshot. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param range_get_content_md5: When set to true and specified together with the Range, the + service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB + in size. + :type range_get_content_md5: bool + :param range_get_content_crc64: When set to true and specified together with the Range, the + service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 + MB in size. + :type range_get_content_crc64: bool + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + accept = "application/xml" + + # Construct URL + url = self.download.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') + if range_get_content_crc64 is not None: + header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) + response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) + response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) + response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) + response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) + response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) + response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) + response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) + response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) + response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) + response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) + response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) + response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) + response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) + response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + download.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def get_properties( + self, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and + system properties for the blob. It does not return the content of the blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-creation-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) + response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-incremental-copy']=self._deserialize('bool', response.headers.get('x-ms-incremental-copy')) + response_headers['x-ms-copy-destination-snapshot']=self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) + response_headers['x-ms-access-tier-inferred']=self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')) + response_headers['x-ms-archive-status']=self._deserialize('str', response.headers.get('x-ms-archive-status')) + response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) + response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) + response_headers['x-ms-expiry-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')) + response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) + response_headers['x-ms-rehydrate-priority']=self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')) + response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) + response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) + response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) + response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def delete( + self, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + delete_snapshots: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] = None, + request_id_parameter: Optional[str] = None, + blob_delete_type: Optional[str] = "Permanent", + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is + permanently removed from the storage account. If the storage account's soft delete feature is + enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible + immediately. However, the blob service retains the blob or snapshot for the number of days + specified by the DeleteRetentionPolicy section of [Storage service properties] + (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's + data is permanently removed from the storage account. Note that you continue to be charged for + the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and + specify the "include=deleted" query parameter to discover which blobs and snapshots have been + soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other + operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code + of 404 (ResourceNotFound). + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param delete_snapshots: Required if the blob has associated snapshots. Specify one of the + following two options: include: Delete the base blob and all of its snapshots. only: Delete + only the blob's snapshots and not the blob itself. + :type delete_snapshots: str or ~azure.storage.blob.models.DeleteSnapshotsOptionType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_delete_type: Optional. Only possible value is 'permanent', which specifies to + permanently delete a blob if blob soft delete is enabled. + :type blob_delete_type: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if blob_delete_type is not None: + query_parameters['deletetype'] = self._serialize.query("blob_delete_type", blob_delete_type, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if delete_snapshots is not None: + header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def set_access_control( + self, + timeout: Optional[int] = None, + owner: Optional[str] = None, + group: Optional[str] = None, + posix_permissions: Optional[str] = None, + posix_acl: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Set the owner, group, permissions, or access control list for a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_acl: Sets POSIX access control rights on files and directories. The value is a + comma-separated list of access control entries. Each access control entry (ACE) consists of a + scope, a type, a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type posix_acl: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "setAccessControl" + accept = "application/xml" + + # Construct URL + url = self.set_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def get_access_control( + self, + timeout: Optional[int] = None, + upn: Optional[bool] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Get the owner, group, permissions, or access control list for a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If + "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response + headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If + "false", the values will be returned as Azure Active Directory Object IDs. The default value is + false. + :type upn: bool + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "getAccessControl" + accept = "application/xml" + + # Construct URL + url = self.get_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) + response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) + response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) + response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def rename( + self, + rename_source: str, + timeout: Optional[int] = None, + path_rename_mode: Optional[Union[str, "_models.PathRenameMode"]] = None, + directory_properties: Optional[str] = None, + posix_permissions: Optional[str] = None, + posix_umask: Optional[str] = None, + source_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Rename a blob/file. By default, the destination is overwritten and if the destination already + exists and has a lease the lease is broken. This operation supports conditional HTTP requests. + For more information, see `Specifying Conditional Headers for Blob Service Operations + `_. + To fail if the destination already exists, use a conditional request with If-None-Match: "*". + + :param rename_source: The file or directory to be renamed. The value must have the following + format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will + overwrite the existing properties; otherwise, the existing properties will be preserved. + :type rename_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param path_rename_mode: Determines the behavior of the rename operation. + :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode + :param directory_properties: Optional. User-defined properties to be stored with the file or + directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", + where each value is base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask + restricts permission settings for file and directory, and will only be applied when default Acl + does not exist in parent directory. If the umask bit has set, it means that the corresponding + permission will be disabled. Otherwise the corresponding permission will be determined by the + permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, + a default umask - 0027 will be used. + :type posix_umask: str + :param source_lease_id: A lease ID for the source path. If specified, the source path must have + an active lease and the lease ID must match. + :type source_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param directory_http_headers: Parameter group. + :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _cache_control = None + _content_type = None + _content_encoding = None + _content_language = None + _content_disposition = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if directory_http_headers is not None: + _cache_control = directory_http_headers.cache_control + _content_type = directory_http_headers.content_type + _content_encoding = directory_http_headers.content_encoding + _content_language = directory_http_headers.content_language + _content_disposition = directory_http_headers.content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + accept = "application/xml" + + # Construct URL + url = self.rename.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if path_rename_mode is not None: + query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def undelete( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """Undelete a blob that was previously soft deleted. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "undelete" + accept = "application/xml" + + # Construct URL + url = self.undelete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + undelete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def set_expiry( + self, + expiry_options: Union[str, "_models.BlobExpiryOptions"], + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + expires_on: Optional[str] = None, + **kwargs: Any + ) -> None: + """Sets the time a blob will expire and be deleted. + + :param expiry_options: Required. Indicates mode of the expiry time. + :type expiry_options: str or ~azure.storage.blob.models.BlobExpiryOptions + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param expires_on: The time to set the blob to expiry. + :type expires_on: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "expiry" + accept = "application/xml" + + # Construct URL + url = self.set_expiry.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') + if expires_on is not None: + header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_expiry.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def set_http_headers( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Set HTTP Headers operation sets system properties on the blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_cache_control = None + _blob_content_type = None + _blob_content_md5 = None + _blob_content_encoding = None + _blob_content_language = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _blob_content_disposition = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_disposition = blob_http_headers.blob_content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.set_http_headers.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_http_headers.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def set_immutability_policy( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Set Immutability Policy operation sets the immutability policy on the blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "immutabilityPolicies" + accept = "application/xml" + + # Construct URL + url = self.set_immutability_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if immutability_policy_expiry is not None: + header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') + if immutability_policy_mode is not None: + header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) + response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_immutability_policy.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def delete_immutability_policy( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """The Delete Immutability Policy operation deletes the immutability policy on the blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "immutabilityPolicies" + accept = "application/xml" + + # Construct URL + url = self.delete_immutability_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete_immutability_policy.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def set_legal_hold( + self, + legal_hold: bool, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """The Set Legal Hold operation sets a legal hold on the blob. + + :param legal_hold: Specified if a legal hold should be set on the blob. + :type legal_hold: bool + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "legalhold" + accept = "application/xml" + + # Construct URL + url = self.set_legal_hold.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_legal_hold.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def set_metadata( + self, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or + more name-value pairs. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "metadata" + accept = "application/xml" + + # Construct URL + url = self.set_metadata.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def acquire_lease( + self, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "acquire" + accept = "application/xml" + + # Construct URL + url = self.acquire_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def release_lease( + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "release" + accept = "application/xml" + + # Construct URL + url = self.release_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def renew_lease( + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "renew" + accept = "application/xml" + + # Construct URL + url = self.renew_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + renew_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def change_lease( + self, + lease_id: str, + proposed_lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "change" + accept = "application/xml" + + # Construct URL + url = self.change_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def break_lease( + self, + timeout: Optional[int] = None, + break_period: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param break_period: For a break operation, proposed duration the lease should continue before + it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining on the lease is used. A new + lease will not be available before the break period has expired, but the lease may be held for + longer than the break period. If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease + breaks immediately. + :type break_period: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "break" + accept = "application/xml" + + # Construct URL + url = self.break_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def create_snapshot( + self, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + request_id_parameter: Optional[str] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Create Snapshot operation creates a read-only snapshot of a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "snapshot" + accept = "application/xml" + + # Construct URL + url = self.create_snapshot.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create_snapshot.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def start_copy_from_url( + self, + copy_source: str, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + seal_blob: Optional[bool] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Start Copy From URL operation copies a blob or an internet resource to a new blob. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived + blob. + :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param seal_blob: Overrides the sealed state of the destination blob. Service version + 2019-12-12 and newer. + :type seal_blob: bool + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. + :type legal_hold: bool + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _source_if_tags = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_tags = source_modified_access_conditions.source_if_tags + accept = "application/xml" + + # Construct URL + url = self.start_copy_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if rehydrate_priority is not None: + header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + if _source_if_tags is not None: + header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if seal_blob is not None: + header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') + if immutability_policy_expiry is not None: + header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') + if immutability_policy_mode is not None: + header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') + if legal_hold is not None: + header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + + if cls: + return cls(pipeline_response, None, response_headers) + + start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def copy_from_url( + self, + copy_source: str, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + request_id_parameter: Optional[str] = None, + source_content_md5: Optional[bytearray] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + copy_source_authorization: Optional[str] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not + return a response until the copy is complete. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. + :type legal_hold: bool + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. + :type copy_source_authorization: str + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + x_ms_requires_sync = "true" + accept = "application/xml" + + # Construct URL + url = self.copy_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-requires-sync'] = self._serialize.header("x_ms_requires_sync", x_ms_requires_sync, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if immutability_policy_expiry is not None: + header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') + if immutability_policy_mode is not None: + header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') + if legal_hold is not None: + header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') + if copy_source_authorization is not None: + header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + + if cls: + return cls(pipeline_response, None, response_headers) + + copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def abort_copy_from_url( + self, + copy_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a + destination blob with zero length and full metadata. + + :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy + Blob operation. + :type copy_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "copy" + copy_action_abort_constant = "abort" + accept = "application/xml" + + # Construct URL + url = self.abort_copy_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def set_tier( + self, + tier: Union[str, "_models.AccessTierRequired"], + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a + premium storage account and on a block blob in a blob storage account (locally redundant + storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of + the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not + update the blob's ETag. + + :param tier: Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierRequired + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived + blob. + :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + comp = "tier" + accept = "application/xml" + + # Construct URL + url = self.set_tier.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if rehydrate_priority is not None: + header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if response.status_code == 202: + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_tier.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def get_account_info( + self, + **kwargs: Any + ) -> None: + """Returns the sku name and account kind. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "account" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_account_info.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) + response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def query( + self, + snapshot: Optional[str] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + query_request: Optional["_models.QueryRequest"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> IO: + """The Query operation enables users to select/project on blob data by providing simple query + expressions. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param query_request: the query request. + :type query_request: ~azure.storage.blob.models.QueryRequest + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "query" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.query.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if query_request is not None: + body_content = self._serialize.body(query_request, 'QueryRequest', is_xml=True) + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + query.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def get_tags( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> "_models.BlobTags": + """The Get Tags operation enables users to get the tags associated with a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: BlobTags, or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlobTags + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobTags"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_tags = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + comp = "tags" + accept = "application/xml" + + # Construct URL + url = self.get_tags.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('BlobTags', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def set_tags( + self, + timeout: Optional[int] = None, + version_id: Optional[str] = None, + transactional_content_md5: Optional[bytearray] = None, + transactional_content_crc64: Optional[bytearray] = None, + request_id_parameter: Optional[str] = None, + tags: Optional["_models.BlobTags"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Set Tags operation enables users to set tags on a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param tags: Blob tags. + :type tags: ~azure.storage.blob.models.BlobTags + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_tags = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + comp = "tags" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.set_tags.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if tags is not None: + body_content = self._serialize.body(tags, 'BlobTags', is_xml=True) + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_block_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_block_blob_operations.py new file mode 100644 index 0000000..3eb1659 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_block_blob_operations.py @@ -0,0 +1,1138 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class BlockBlobOperations: + """BlockBlobOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def upload( + self, + content_length: int, + body: IO, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytearray] = None, + metadata: Optional[str] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Upload Block Blob operation updates the content of an existing block blob. Updating an + existing block blob overwrites any existing metadata on the blob. Partial updates are not + supported with Put Blob; the content of the existing blob is overwritten with the content of + the new blob. To perform a partial update of the content of a block blob, use the Put Block + List operation. + + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. + :type legal_hold: bool + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + blob_type = "BlockBlob" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.upload.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if immutability_policy_expiry is not None: + header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') + if immutability_policy_mode is not None: + header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') + if legal_hold is not None: + header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def put_blob_from_url( + self, + content_length: int, + copy_source: str, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytearray] = None, + metadata: Optional[str] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + request_id_parameter: Optional[str] = None, + source_content_md5: Optional[bytearray] = None, + blob_tags_string: Optional[str] = None, + copy_source_blob_properties: Optional[bool] = None, + copy_source_authorization: Optional[str] = None, + blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Put Blob from URL operation creates a new Block Blob where the contents of the blob are + read from a given URL. This API is supported beginning with the 2020-04-08 version. Partial + updates are not supported with Put Blob from URL; the content of an existing blob is + overwritten with the content of the new blob. To perform partial updates to a block blob’s + contents using a source URL, use the Put Block from URL API in conjunction with Put Block List. + + :param content_length: The length of the request. + :type content_length: long + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param copy_source_blob_properties: Optional, default is true. Indicates if properties from + the source blob should be copied. + :type copy_source_blob_properties: bool + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. + :type copy_source_authorization: str + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _source_if_tags = None + if blob_http_headers is not None: + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_tags = source_modified_access_conditions.source_if_tags + blob_type = "BlockBlob" + accept = "application/xml" + + # Construct URL + url = self.put_blob_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + if _source_if_tags is not None: + header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if copy_source_blob_properties is not None: + header_parameters['x-ms-copy-source-blob-properties'] = self._serialize.header("copy_source_blob_properties", copy_source_blob_properties, 'bool') + if copy_source_authorization is not None: + header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + put_blob_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def stage_block( + self, + block_id: str, + content_length: int, + body: IO, + transactional_content_md5: Optional[bytearray] = None, + transactional_content_crc64: Optional[bytearray] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + **kwargs: Any + ) -> None: + """The Stage Block operation creates a new block to be committed as part of a blob. + + :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the + string must be less than or equal to 64 bytes in size. For a given blob, the length of the + value specified for the blockid parameter must be the same size for each block. + :type block_id: str + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data. + :type body: IO + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "block" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.stage_block.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + stage_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def stage_block_from_url( + self, + block_id: str, + content_length: int, + source_url: str, + source_range: Optional[str] = None, + source_content_md5: Optional[bytearray] = None, + source_contentcrc64: Optional[bytearray] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + copy_source_authorization: Optional[str] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Stage Block operation creates a new block to be committed as part of a blob where the + contents are read from a URL. + + :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the + string must be less than or equal to 64 bytes in size. For a given blob, the length of the + value specified for the blockid parameter must be the same size for each block. + :type block_id: str + :param content_length: The length of the request. + :type content_length: long + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. + :type copy_source_authorization: str + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + comp = "block" + accept = "application/xml" + + # Construct URL + url = self.stage_block_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if copy_source_authorization is not None: + header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def commit_block_list( + self, + blocks: "_models.BlockLookupList", + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytearray] = None, + transactional_content_crc64: Optional[bytearray] = None, + metadata: Optional[str] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Commit Block List operation writes a blob by specifying the list of block IDs that make up + the blob. In order to be written as part of a blob, a block must have been successfully written + to the server in a prior Put Block operation. You can call Put Block List to update a blob by + uploading only those blocks that have changed, then committing the new and existing blocks + together. You can do this by specifying whether to commit a block from the committed block list + or from the uncommitted block list, or to commit the most recently uploaded version of the + block, whichever list it may belong to. + + :param blocks: + :type blocks: ~azure.storage.blob.models.BlockLookupList + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. + :type legal_hold: bool + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_cache_control = None + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "blocklist" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.commit_block_list.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if immutability_policy_expiry is not None: + header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') + if immutability_policy_mode is not None: + header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') + if legal_hold is not None: + header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(blocks, 'BlockLookupList', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + commit_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def get_block_list( + self, + snapshot: Optional[str] = None, + list_type: Union[str, "_models.BlockListType"] = "committed", + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> "_models.BlockList": + """The Get Block List operation retrieves the list of blocks that have been uploaded as part of a + block blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param list_type: Specifies whether to return the list of committed blocks, the list of + uncommitted blocks, or both lists together. + :type list_type: str or ~azure.storage.blob.models.BlockListType + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: BlockList, or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlockList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.BlockList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + comp = "blocklist" + accept = "application/xml" + + # Construct URL + url = self.get_block_list.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('BlockList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_container_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_container_operations.py new file mode 100644 index 0000000..17f58d5 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_container_operations.py @@ -0,0 +1,1648 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class ContainerOperations: + """ContainerOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create( + self, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + access: Optional[Union[str, "_models.PublicAccessType"]] = None, + request_id_parameter: Optional[str] = None, + container_cpk_scope_info: Optional["_models.ContainerCpkScopeInfo"] = None, + **kwargs: Any + ) -> None: + """creates a new container under the specified account. If the container with the same name + already exists, the operation fails. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param access: Specifies whether data in the container may be accessed publicly and the level + of access. + :type access: str or ~azure.storage.blob.models.PublicAccessType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param container_cpk_scope_info: Parameter group. + :type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _default_encryption_scope = None + _prevent_encryption_scope_override = None + if container_cpk_scope_info is not None: + _default_encryption_scope = container_cpk_scope_info.default_encryption_scope + _prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override + restype = "container" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if access is not None: + header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _default_encryption_scope is not None: + header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", _default_encryption_scope, 'str') + if _prevent_encryption_scope_override is not None: + header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", _prevent_encryption_scope_override, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{containerName}'} # type: ignore + + async def get_properties( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """returns all user-defined metadata and system properties for the specified container. The data + returned does not include the container's list of blobs. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "container" + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) + response_headers['x-ms-has-immutability-policy']=self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')) + response_headers['x-ms-has-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')) + response_headers['x-ms-default-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')) + response_headers['x-ms-deny-encryption-scope-override']=self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')) + response_headers['x-ms-immutable-storage-with-versioning-enabled']=self._deserialize('bool', response.headers.get('x-ms-immutable-storage-with-versioning-enabled')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{containerName}'} # type: ignore + + async def delete( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """operation marks the specified container for deletion. The container and any blobs contained + within it are later deleted during garbage collection. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + restype = "container" + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{containerName}'} # type: ignore + + async def set_metadata( + self, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """operation sets one or more user-defined name-value pairs for the specified container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + restype = "container" + comp = "metadata" + accept = "application/xml" + + # Construct URL + url = self.set_metadata.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {'url': '/{containerName}'} # type: ignore + + async def get_access_policy( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> List["_models.SignedIdentifier"]: + """gets the permissions for the specified container. The permissions indicate whether container + data may be accessed publicly. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of SignedIdentifier, or the result of cls(response) + :rtype: list[~azure.storage.blob.models.SignedIdentifier] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "container" + comp = "acl" + accept = "application/xml" + + # Construct URL + url = self.get_access_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_access_policy.metadata = {'url': '/{containerName}'} # type: ignore + + async def set_access_policy( + self, + timeout: Optional[int] = None, + access: Optional[Union[str, "_models.PublicAccessType"]] = None, + request_id_parameter: Optional[str] = None, + container_acl: Optional[List["_models.SignedIdentifier"]] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """sets the permissions for the specified container. The permissions indicate whether blobs in a + container may be accessed publicly. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param access: Specifies whether data in the container may be accessed publicly and the level + of access. + :type access: str or ~azure.storage.blob.models.PublicAccessType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param container_acl: the acls for the container. + :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + restype = "container" + comp = "acl" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.set_access_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if access is not None: + header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True, 'itemsName': 'SignedIdentifier'}} + if container_acl is not None: + body_content = self._serialize.body(container_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_policy.metadata = {'url': '/{containerName}'} # type: ignore + + async def restore( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + deleted_container_name: Optional[str] = None, + deleted_container_version: Optional[str] = None, + **kwargs: Any + ) -> None: + """Restores a previously-deleted container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param deleted_container_name: Optional. Version 2019-12-12 and later. Specifies the name of + the deleted container to restore. + :type deleted_container_name: str + :param deleted_container_version: Optional. Version 2019-12-12 and later. Specifies the + version of the deleted container to restore. + :type deleted_container_version: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "undelete" + accept = "application/xml" + + # Construct URL + url = self.restore.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if deleted_container_name is not None: + header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') + if deleted_container_version is not None: + header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + restore.metadata = {'url': '/{containerName}'} # type: ignore + + async def rename( + self, + source_container_name: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + source_lease_id: Optional[str] = None, + **kwargs: Any + ) -> None: + """Renames an existing container. + + :param source_container_name: Required. Specifies the name of the container to rename. + :type source_container_name: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param source_lease_id: A lease ID for the source path. If specified, the source path must have + an active lease and the lease ID must match. + :type source_lease_id: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "rename" + accept = "application/xml" + + # Construct URL + url = self.rename.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-source-container-name'] = self._serialize.header("source_container_name", source_container_name, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {'url': '/{containerName}'} # type: ignore + + async def submit_batch( + self, + content_length: int, + multipart_content_type: str, + body: IO, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> IO: + """The Batch operation allows multiple API calls to be embedded into a single HTTP request. + + :param content_length: The length of the request. + :type content_length: long + :param multipart_content_type: Required. The value of this header must be multipart/mixed with + a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. + :type multipart_content_type: str + :param body: Initial data. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "batch" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.submit_batch.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(body, 'IO', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + submit_batch.metadata = {'url': '/{containerName}'} # type: ignore + + async def acquire_lease( + self, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "acquire" + accept = "application/xml" + + # Construct URL + url = self.acquire_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {'url': '/{containerName}'} # type: ignore + + async def release_lease( + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "release" + accept = "application/xml" + + # Construct URL + url = self.release_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {'url': '/{containerName}'} # type: ignore + + async def renew_lease( + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "renew" + accept = "application/xml" + + # Construct URL + url = self.renew_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + renew_lease.metadata = {'url': '/{containerName}'} # type: ignore + + async def break_lease( + self, + timeout: Optional[int] = None, + break_period: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param break_period: For a break operation, proposed duration the lease should continue before + it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining on the lease is used. A new + lease will not be available before the break period has expired, but the lease may be held for + longer than the break period. If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease + breaks immediately. + :type break_period: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "break" + accept = "application/xml" + + # Construct URL + url = self.break_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {'url': '/{containerName}'} # type: ignore + + async def change_lease( + self, + lease_id: str, + proposed_lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "change" + accept = "application/xml" + + # Construct URL + url = self.change_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {'url': '/{containerName}'} # type: ignore + + async def list_blob_flat_segment( + self, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> "_models.ListBlobsFlatSegmentResponse": + """[Update] The List Blobs operation returns a list of the blobs under the specified container. + + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. + :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListBlobsFlatSegmentResponse, or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsFlatSegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_blob_flat_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListBlobsFlatSegmentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_blob_flat_segment.metadata = {'url': '/{containerName}'} # type: ignore + + async def list_blob_hierarchy_segment( + self, + delimiter: str, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> "_models.ListBlobsHierarchySegmentResponse": + """[Update] The List Blobs operation returns a list of the blobs under the specified container. + + :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix + element in the response body that acts as a placeholder for all blobs whose names begin with + the same substring up to the appearance of the delimiter character. The delimiter may be a + single character or a string. + :type delimiter: str + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. + :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} # type: ignore + + async def get_account_info( + self, + **kwargs: Any + ) -> None: + """Returns the sku name and account kind. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "account" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_account_info.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) + response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {'url': '/{containerName}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_directory_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_directory_operations.py new file mode 100644 index 0000000..12e49a1 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_directory_operations.py @@ -0,0 +1,742 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class DirectoryOperations: + """DirectoryOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create( + self, + timeout: Optional[int] = None, + directory_properties: Optional[str] = None, + posix_permissions: Optional[str] = None, + posix_umask: Optional[str] = None, + request_id_parameter: Optional[str] = None, + directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Create a directory. By default, the destination is overwritten and if the destination already + exists and has a lease the lease is broken. This operation supports conditional HTTP requests. + For more information, see `Specifying Conditional Headers for Blob Service Operations + `_. + To fail if the destination already exists, use a conditional request with If-None-Match: "*". + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param directory_properties: Optional. User-defined properties to be stored with the file or + directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", + where each value is base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask + restricts permission settings for file and directory, and will only be applied when default Acl + does not exist in parent directory. If the umask bit has set, it means that the corresponding + permission will be disabled. Otherwise the corresponding permission will be determined by the + permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, + a default umask - 0027 will be used. + :type posix_umask: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param directory_http_headers: Parameter group. + :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _cache_control = None + _content_type = None + _content_encoding = None + _content_language = None + _content_disposition = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + if directory_http_headers is not None: + _cache_control = directory_http_headers.cache_control + _content_type = directory_http_headers.content_type + _content_encoding = directory_http_headers.content_encoding + _content_language = directory_http_headers.content_language + _content_disposition = directory_http_headers.content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + resource = "directory" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['resource'] = self._serialize.query("resource", resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def rename( + self, + rename_source: str, + timeout: Optional[int] = None, + marker: Optional[str] = None, + path_rename_mode: Optional[Union[str, "_models.PathRenameMode"]] = None, + directory_properties: Optional[str] = None, + posix_permissions: Optional[str] = None, + posix_umask: Optional[str] = None, + source_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + directory_http_headers: Optional["_models.DirectoryHttpHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Rename a directory. By default, the destination is overwritten and if the destination already + exists and has a lease the lease is broken. This operation supports conditional HTTP requests. + For more information, see `Specifying Conditional Headers for Blob Service Operations + `_. + To fail if the destination already exists, use a conditional request with If-None-Match: "*". + + :param rename_source: The file or directory to be renamed. The value must have the following + format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will + overwrite the existing properties; otherwise, the existing properties will be preserved. + :type rename_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param marker: When renaming a directory, the number of paths that are renamed with each + invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation + token is returned in this response header. When a continuation token is returned in the + response, it must be specified in a subsequent invocation of the rename operation to continue + renaming the directory. + :type marker: str + :param path_rename_mode: Determines the behavior of the rename operation. + :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode + :param directory_properties: Optional. User-defined properties to be stored with the file or + directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", + where each value is base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask + restricts permission settings for file and directory, and will only be applied when default Acl + does not exist in parent directory. If the umask bit has set, it means that the corresponding + permission will be disabled. Otherwise the corresponding permission will be determined by the + permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, + a default umask - 0027 will be used. + :type posix_umask: str + :param source_lease_id: A lease ID for the source path. If specified, the source path must have + an active lease and the lease ID must match. + :type source_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param directory_http_headers: Parameter group. + :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _cache_control = None + _content_type = None + _content_encoding = None + _content_language = None + _content_disposition = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if directory_http_headers is not None: + _cache_control = directory_http_headers.cache_control + _content_type = directory_http_headers.content_type + _content_encoding = directory_http_headers.content_encoding + _content_language = directory_http_headers.content_language + _content_disposition = directory_http_headers.content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + accept = "application/xml" + + # Construct URL + url = self.rename.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') + if path_rename_mode is not None: + query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def delete( + self, + recursive_directory_delete: bool, + timeout: Optional[int] = None, + marker: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Deletes the directory. + + :param recursive_directory_delete: If "true", all paths beneath the directory will be deleted. + If "false" and the directory is non-empty, an error occurs. + :type recursive_directory_delete: bool + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param marker: When renaming a directory, the number of paths that are renamed with each + invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation + token is returned in this response header. When a continuation token is returned in the + response, it must be specified in a subsequent invocation of the rename operation to continue + renaming the directory. + :type marker: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') + if marker is not None: + query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def set_access_control( + self, + timeout: Optional[int] = None, + owner: Optional[str] = None, + group: Optional[str] = None, + posix_permissions: Optional[str] = None, + posix_acl: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Set the owner, group, permissions, or access control list for a directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_acl: Sets POSIX access control rights on files and directories. The value is a + comma-separated list of access control entries. Each access control entry (ACE) consists of a + scope, a type, a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type posix_acl: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "setAccessControl" + accept = "application/xml" + + # Construct URL + url = self.set_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def get_access_control( + self, + timeout: Optional[int] = None, + upn: Optional[bool] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Get the owner, group, permissions, or access control list for a directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If + "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response + headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If + "false", the values will be returned as Azure Active Directory Object IDs. The default value is + false. + :type upn: bool + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "getAccessControl" + accept = "application/xml" + + # Construct URL + url = self.get_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) + response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) + response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) + response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_page_blob_operations.py new file mode 100644 index 0000000..06f1755 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_page_blob_operations.py @@ -0,0 +1,1424 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class PageBlobOperations: + """PageBlobOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create( + self, + content_length: int, + blob_content_length: int, + timeout: Optional[int] = None, + tier: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] = None, + metadata: Optional[str] = None, + blob_sequence_number: Optional[int] = 0, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + blob_http_headers: Optional["_models.BlobHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Create operation creates a new page blob. + + :param content_length: The length of the request. + :type content_length: long + :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 + TB. The page blob size must be aligned to a 512-byte boundary. + :type blob_content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param tier: Optional. Indicates the tier to be set on the page blob. + :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled + value that you can use to track requests. The value of the sequence number must be between 0 + and 2^63 - 1. + :type blob_sequence_number: long + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. + :type legal_hold: bool + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + blob_type = "PageBlob" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') + if blob_sequence_number is not None: + header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if immutability_policy_expiry is not None: + header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') + if immutability_policy_mode is not None: + header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') + if legal_hold is not None: + header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def upload_pages( + self, + content_length: int, + body: IO, + transactional_content_md5: Optional[bytearray] = None, + transactional_content_crc64: Optional[bytearray] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Upload Pages operation writes a range of pages to a page blob. + + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data. + :type body: IO + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param sequence_number_access_conditions: Parameter group. + :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if sequence_number_access_conditions is not None: + _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + comp = "page" + page_write = "update" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.upload_pages.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') + if _if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') + if _if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def clear_pages( + self, + content_length: int, + timeout: Optional[int] = None, + range: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Clear Pages operation clears a set of pages from a page blob. + + :param content_length: The length of the request. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param sequence_number_access_conditions: Parameter group. + :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if sequence_number_access_conditions is not None: + _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + comp = "page" + page_write = "clear" + accept = "application/xml" + + # Construct URL + url = self.clear_pages.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') + if _if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') + if _if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + clear_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def upload_pages_from_url( + self, + source_url: str, + source_range: str, + content_length: int, + range: str, + source_content_md5: Optional[bytearray] = None, + source_contentcrc64: Optional[bytearray] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + copy_source_authorization: Optional[str] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + sequence_number_access_conditions: Optional["_models.SequenceNumberAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Upload Pages operation writes a range of pages to a page blob where the contents are read + from a URL. + + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param source_range: Bytes of source data in the specified range. The length of this range + should match the ContentLength header and x-ms-range/Range destination range header. + :type source_range: str + :param content_length: The length of the request. + :type content_length: long + :param range: The range of bytes to which the source range would be written. The range should + be 512 aligned and range-end is required. + :type range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. + :type copy_source_authorization: str + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param sequence_number_access_conditions: Parameter group. + :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if sequence_number_access_conditions is not None: + _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + comp = "page" + page_write = "update" + accept = "application/xml" + + # Construct URL + url = self.upload_pages_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') + if _if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') + if _if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if copy_source_authorization is not None: + header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def get_page_ranges( + self, + snapshot: Optional[str] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> "_models.PageList": + """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot + of a page blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PageList, or the result of cls(response) + :rtype: ~azure.storage.blob.models.PageList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "pagelist" + accept = "application/xml" + + # Construct URL + url = self.get_page_ranges.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('PageList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def get_page_ranges_diff( + self, + snapshot: Optional[str] = None, + timeout: Optional[int] = None, + prevsnapshot: Optional[str] = None, + prev_snapshot_url: Optional[str] = None, + range: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> "_models.PageList": + """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that + were changed between target blob and previous snapshot. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a + DateTime value that specifies that the response will contain only pages that were changed + between target blob and previous snapshot. Changed pages include both updated and cleared + pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is + the older of the two. Note that incremental snapshots are currently supported only for blobs + created on or after January 1, 2016. + :type prevsnapshot: str + :param prev_snapshot_url: Optional. This header is only supported in service versions + 2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The + response will only contain pages that were changed between the target blob and its previous + snapshot. + :type prev_snapshot_url: str + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PageList, or the result of cls(response) + :rtype: ~azure.storage.blob.models.PageList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "pagelist" + accept = "application/xml" + + # Construct URL + url = self.get_page_ranges_diff.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if prevsnapshot is not None: + query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if prev_snapshot_url is not None: + header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('PageList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def resize( + self, + blob_content_length: int, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + cpk_info: Optional["_models.CpkInfo"] = None, + cpk_scope_info: Optional["_models.CpkScopeInfo"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Resize the Blob. + + :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 + TB. The page blob size must be aligned to a 512-byte boundary. + :type blob_content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.resize.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + resize.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def update_sequence_number( + self, + sequence_number_action: Union[str, "_models.SequenceNumberActionType"], + timeout: Optional[int] = None, + blob_sequence_number: Optional[int] = 0, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Update the sequence number of the blob. + + :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the + request. This property applies to page blobs only. This property indicates how the service + should modify the blob's sequence number. + :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled + value that you can use to track requests. The value of the sequence number must be between 0 + and 2^63 - 1. + :type blob_sequence_number: long + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.update_sequence_number.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'str') + if blob_sequence_number is not None: + header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + async def copy_incremental( + self, + copy_source: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Copy Incremental operation copies a snapshot of the source page blob to a destination page + blob. The snapshot is copied such that only the differential changes between the previously + copied snapshot are transferred to the destination. The copied snapshots are complete copies of + the original snapshot and can be read or copied from as usual. This API is supported since REST + version 2016-05-31. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "incrementalcopy" + accept = "application/xml" + + # Construct URL + url = self.copy_incremental.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + + if cls: + return cls(pipeline_response, None, response_headers) + + copy_incremental.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_service_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_service_operations.py new file mode 100644 index 0000000..8127498 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/aio/operations/_service_operations.py @@ -0,0 +1,698 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class ServiceOperations: + """ServiceOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def set_properties( + self, + storage_service_properties: "_models.StorageServiceProperties", + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """Sets properties for a storage account's Blob service endpoint, including properties for Storage + Analytics and CORS (Cross-Origin Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. + :type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "properties" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.set_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {'url': '/'} # type: ignore + + async def get_properties( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> "_models.StorageServiceProperties": + """gets the properties of a storage account's Blob service, including properties for Storage + Analytics and CORS (Cross-Origin Resource Sharing) rules. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageServiceProperties, or the result of cls(response) + :rtype: ~azure.storage.blob.models.StorageServiceProperties + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = self._deserialize('StorageServiceProperties', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_properties.metadata = {'url': '/'} # type: ignore + + async def get_statistics( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> "_models.StorageServiceStats": + """Retrieves statistics related to replication for the Blob service. It is only available on the + secondary location endpoint when read-access geo-redundant replication is enabled for the + storage account. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageServiceStats, or the result of cls(response) + :rtype: ~azure.storage.blob.models.StorageServiceStats + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceStats"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "stats" + accept = "application/xml" + + # Construct URL + url = self.get_statistics.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('StorageServiceStats', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_statistics.metadata = {'url': '/'} # type: ignore + + async def list_containers_segment( + self, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> "_models.ListContainersSegmentResponse": + """The List Containers Segment operation returns a list of the containers under the specified + account. + + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. + :type maxresults: int + :param include: Include this parameter to specify that the container's metadata be returned as + part of the response body. + :type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListContainersSegmentResponse, or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainersSegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_containers_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = self._deserialize('ListContainersSegmentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_containers_segment.metadata = {'url': '/'} # type: ignore + + async def get_user_delegation_key( + self, + key_info: "_models.KeyInfo", + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> "_models.UserDelegationKey": + """Retrieves a user delegation key for the Blob service. This is only a valid operation when using + bearer token authentication. + + :param key_info: + :type key_info: ~azure.storage.blob.models.KeyInfo + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: UserDelegationKey, or the result of cls(response) + :rtype: ~azure.storage.blob.models.UserDelegationKey + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.UserDelegationKey"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "userdelegationkey" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.get_user_delegation_key.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(key_info, 'KeyInfo', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('UserDelegationKey', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_user_delegation_key.metadata = {'url': '/'} # type: ignore + + async def get_account_info( + self, + **kwargs: Any + ) -> None: + """Returns the sku name and account kind. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "account" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_account_info.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) + response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) + response_headers['x-ms-is-hns-enabled']=self._deserialize('bool', response.headers.get('x-ms-is-hns-enabled')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {'url': '/'} # type: ignore + + async def submit_batch( + self, + content_length: int, + multipart_content_type: str, + body: IO, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> IO: + """The Batch operation allows multiple API calls to be embedded into a single HTTP request. + + :param content_length: The length of the request. + :type content_length: long + :param multipart_content_type: Required. The value of this header must be multipart/mixed with + a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. + :type multipart_content_type: str + :param body: Initial data. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "batch" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.submit_batch.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(body, 'IO', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + submit_batch.metadata = {'url': '/'} # type: ignore + + async def filter_blobs( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + where: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + **kwargs: Any + ) -> "_models.FilterBlobSegment": + """The Filter Blobs operation enables callers to list blobs across all containers whose tags match + a given search expression. Filter blobs searches across all containers within a storage + account but can be scoped within the expression to a single container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param where: Filters the results to return only to return only blobs whose tags match the + specified expression. + :type where: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. + :type maxresults: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: FilterBlobSegment, or the result of cls(response) + :rtype: ~azure.storage.blob.models.FilterBlobSegment + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "blobs" + accept = "application/xml" + + # Construct URL + url = self.filter_blobs.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if where is not None: + query_parameters['where'] = self._serialize.query("where", where, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('FilterBlobSegment', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + filter_blobs.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/__init__.py new file mode 100644 index 0000000..3efc69e --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/__init__.py @@ -0,0 +1,227 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import AccessPolicy + from ._models_py3 import AppendPositionAccessConditions + from ._models_py3 import ArrowConfiguration + from ._models_py3 import ArrowField + from ._models_py3 import BlobFlatListSegment + from ._models_py3 import BlobHTTPHeaders + from ._models_py3 import BlobHierarchyListSegment + from ._models_py3 import BlobItemInternal + from ._models_py3 import BlobMetadata + from ._models_py3 import BlobPrefix + from ._models_py3 import BlobPropertiesInternal + from ._models_py3 import BlobTag + from ._models_py3 import BlobTags + from ._models_py3 import Block + from ._models_py3 import BlockList + from ._models_py3 import BlockLookupList + from ._models_py3 import ClearRange + from ._models_py3 import ContainerCpkScopeInfo + from ._models_py3 import ContainerItem + from ._models_py3 import ContainerProperties + from ._models_py3 import CorsRule + from ._models_py3 import CpkInfo + from ._models_py3 import CpkScopeInfo + from ._models_py3 import DataLakeStorageError + from ._models_py3 import DataLakeStorageErrorError + from ._models_py3 import DelimitedTextConfiguration + from ._models_py3 import DirectoryHttpHeaders + from ._models_py3 import FilterBlobItem + from ._models_py3 import FilterBlobSegment + from ._models_py3 import GeoReplication + from ._models_py3 import JsonTextConfiguration + from ._models_py3 import KeyInfo + from ._models_py3 import LeaseAccessConditions + from ._models_py3 import ListBlobsFlatSegmentResponse + from ._models_py3 import ListBlobsHierarchySegmentResponse + from ._models_py3 import ListContainersSegmentResponse + from ._models_py3 import Logging + from ._models_py3 import Metrics + from ._models_py3 import ModifiedAccessConditions + from ._models_py3 import PageList + from ._models_py3 import PageRange + from ._models_py3 import QueryFormat + from ._models_py3 import QueryRequest + from ._models_py3 import QuerySerialization + from ._models_py3 import RetentionPolicy + from ._models_py3 import SequenceNumberAccessConditions + from ._models_py3 import SignedIdentifier + from ._models_py3 import SourceModifiedAccessConditions + from ._models_py3 import StaticWebsite + from ._models_py3 import StorageError + from ._models_py3 import StorageServiceProperties + from ._models_py3 import StorageServiceStats + from ._models_py3 import UserDelegationKey +except (SyntaxError, ImportError): + from ._models import AccessPolicy # type: ignore + from ._models import AppendPositionAccessConditions # type: ignore + from ._models import ArrowConfiguration # type: ignore + from ._models import ArrowField # type: ignore + from ._models import BlobFlatListSegment # type: ignore + from ._models import BlobHTTPHeaders # type: ignore + from ._models import BlobHierarchyListSegment # type: ignore + from ._models import BlobItemInternal # type: ignore + from ._models import BlobMetadata # type: ignore + from ._models import BlobPrefix # type: ignore + from ._models import BlobPropertiesInternal # type: ignore + from ._models import BlobTag # type: ignore + from ._models import BlobTags # type: ignore + from ._models import Block # type: ignore + from ._models import BlockList # type: ignore + from ._models import BlockLookupList # type: ignore + from ._models import ClearRange # type: ignore + from ._models import ContainerCpkScopeInfo # type: ignore + from ._models import ContainerItem # type: ignore + from ._models import ContainerProperties # type: ignore + from ._models import CorsRule # type: ignore + from ._models import CpkInfo # type: ignore + from ._models import CpkScopeInfo # type: ignore + from ._models import DataLakeStorageError # type: ignore + from ._models import DataLakeStorageErrorError # type: ignore + from ._models import DelimitedTextConfiguration # type: ignore + from ._models import DirectoryHttpHeaders # type: ignore + from ._models import FilterBlobItem # type: ignore + from ._models import FilterBlobSegment # type: ignore + from ._models import GeoReplication # type: ignore + from ._models import JsonTextConfiguration # type: ignore + from ._models import KeyInfo # type: ignore + from ._models import LeaseAccessConditions # type: ignore + from ._models import ListBlobsFlatSegmentResponse # type: ignore + from ._models import ListBlobsHierarchySegmentResponse # type: ignore + from ._models import ListContainersSegmentResponse # type: ignore + from ._models import Logging # type: ignore + from ._models import Metrics # type: ignore + from ._models import ModifiedAccessConditions # type: ignore + from ._models import PageList # type: ignore + from ._models import PageRange # type: ignore + from ._models import QueryFormat # type: ignore + from ._models import QueryRequest # type: ignore + from ._models import QuerySerialization # type: ignore + from ._models import RetentionPolicy # type: ignore + from ._models import SequenceNumberAccessConditions # type: ignore + from ._models import SignedIdentifier # type: ignore + from ._models import SourceModifiedAccessConditions # type: ignore + from ._models import StaticWebsite # type: ignore + from ._models import StorageError # type: ignore + from ._models import StorageServiceProperties # type: ignore + from ._models import StorageServiceStats # type: ignore + from ._models import UserDelegationKey # type: ignore + +from ._azure_blob_storage_enums import ( + AccessTier, + AccessTierOptional, + AccessTierRequired, + AccountKind, + ArchiveStatus, + BlobExpiryOptions, + BlobImmutabilityPolicyMode, + BlobType, + BlockListType, + CopyStatusType, + DeleteSnapshotsOptionType, + EncryptionAlgorithmType, + GeoReplicationStatusType, + LeaseDurationType, + LeaseStateType, + LeaseStatusType, + ListBlobsIncludeItem, + ListContainersIncludeType, + PathRenameMode, + PremiumPageBlobAccessTier, + PublicAccessType, + QueryFormatType, + RehydratePriority, + SequenceNumberActionType, + SkuName, + StorageErrorCode, +) + +__all__ = [ + 'AccessPolicy', + 'AppendPositionAccessConditions', + 'ArrowConfiguration', + 'ArrowField', + 'BlobFlatListSegment', + 'BlobHTTPHeaders', + 'BlobHierarchyListSegment', + 'BlobItemInternal', + 'BlobMetadata', + 'BlobPrefix', + 'BlobPropertiesInternal', + 'BlobTag', + 'BlobTags', + 'Block', + 'BlockList', + 'BlockLookupList', + 'ClearRange', + 'ContainerCpkScopeInfo', + 'ContainerItem', + 'ContainerProperties', + 'CorsRule', + 'CpkInfo', + 'CpkScopeInfo', + 'DataLakeStorageError', + 'DataLakeStorageErrorError', + 'DelimitedTextConfiguration', + 'DirectoryHttpHeaders', + 'FilterBlobItem', + 'FilterBlobSegment', + 'GeoReplication', + 'JsonTextConfiguration', + 'KeyInfo', + 'LeaseAccessConditions', + 'ListBlobsFlatSegmentResponse', + 'ListBlobsHierarchySegmentResponse', + 'ListContainersSegmentResponse', + 'Logging', + 'Metrics', + 'ModifiedAccessConditions', + 'PageList', + 'PageRange', + 'QueryFormat', + 'QueryRequest', + 'QuerySerialization', + 'RetentionPolicy', + 'SequenceNumberAccessConditions', + 'SignedIdentifier', + 'SourceModifiedAccessConditions', + 'StaticWebsite', + 'StorageError', + 'StorageServiceProperties', + 'StorageServiceStats', + 'UserDelegationKey', + 'AccessTier', + 'AccessTierOptional', + 'AccessTierRequired', + 'AccountKind', + 'ArchiveStatus', + 'BlobExpiryOptions', + 'BlobImmutabilityPolicyMode', + 'BlobType', + 'BlockListType', + 'CopyStatusType', + 'DeleteSnapshotsOptionType', + 'EncryptionAlgorithmType', + 'GeoReplicationStatusType', + 'LeaseDurationType', + 'LeaseStateType', + 'LeaseStatusType', + 'ListBlobsIncludeItem', + 'ListContainersIncludeType', + 'PathRenameMode', + 'PremiumPageBlobAccessTier', + 'PublicAccessType', + 'QueryFormatType', + 'RehydratePriority', + 'SequenceNumberActionType', + 'SkuName', + 'StorageErrorCode', +] diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_azure_blob_storage_enums.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_azure_blob_storage_enums.py new file mode 100644 index 0000000..39eec6b --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_azure_blob_storage_enums.py @@ -0,0 +1,350 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum, EnumMeta +from six import with_metaclass + +class _CaseInsensitiveEnumMeta(EnumMeta): + def __getitem__(self, name): + return super().__getitem__(name.upper()) + + def __getattr__(cls, name): + """Return the enum member matching `name` + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + """ + try: + return cls._member_map_[name.upper()] + except KeyError: + raise AttributeError(name) + + +class AccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + P4 = "P4" + P6 = "P6" + P10 = "P10" + P15 = "P15" + P20 = "P20" + P30 = "P30" + P40 = "P40" + P50 = "P50" + P60 = "P60" + P70 = "P70" + P80 = "P80" + HOT = "Hot" + COOL = "Cool" + ARCHIVE = "Archive" + +class AccessTierOptional(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + P4 = "P4" + P6 = "P6" + P10 = "P10" + P15 = "P15" + P20 = "P20" + P30 = "P30" + P40 = "P40" + P50 = "P50" + P60 = "P60" + P70 = "P70" + P80 = "P80" + HOT = "Hot" + COOL = "Cool" + ARCHIVE = "Archive" + +class AccessTierRequired(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + P4 = "P4" + P6 = "P6" + P10 = "P10" + P15 = "P15" + P20 = "P20" + P30 = "P30" + P40 = "P40" + P50 = "P50" + P60 = "P60" + P70 = "P70" + P80 = "P80" + HOT = "Hot" + COOL = "Cool" + ARCHIVE = "Archive" + +class AccountKind(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + STORAGE = "Storage" + BLOB_STORAGE = "BlobStorage" + STORAGE_V2 = "StorageV2" + FILE_STORAGE = "FileStorage" + BLOCK_BLOB_STORAGE = "BlockBlobStorage" + +class ArchiveStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + REHYDRATE_PENDING_TO_HOT = "rehydrate-pending-to-hot" + REHYDRATE_PENDING_TO_COOL = "rehydrate-pending-to-cool" + +class BlobExpiryOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + NEVER_EXPIRE = "NeverExpire" + RELATIVE_TO_CREATION = "RelativeToCreation" + RELATIVE_TO_NOW = "RelativeToNow" + ABSOLUTE = "Absolute" + +class BlobImmutabilityPolicyMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + UNLOCKED = "Unlocked" + LOCKED = "Locked" + MUTABLE = "Mutable" + +class BlobType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + BLOCK_BLOB = "BlockBlob" + PAGE_BLOB = "PageBlob" + APPEND_BLOB = "AppendBlob" + +class BlockListType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + COMMITTED = "committed" + UNCOMMITTED = "uncommitted" + ALL = "all" + +class CopyStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + PENDING = "pending" + SUCCESS = "success" + ABORTED = "aborted" + FAILED = "failed" + +class DeleteSnapshotsOptionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + INCLUDE = "include" + ONLY = "only" + +class EncryptionAlgorithmType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + NONE = "None" + AES256 = "AES256" + +class GeoReplicationStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The status of the secondary location + """ + + LIVE = "live" + BOOTSTRAP = "bootstrap" + UNAVAILABLE = "unavailable" + +class LeaseDurationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + INFINITE = "infinite" + FIXED = "fixed" + +class LeaseStateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + AVAILABLE = "available" + LEASED = "leased" + EXPIRED = "expired" + BREAKING = "breaking" + BROKEN = "broken" + +class LeaseStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + LOCKED = "locked" + UNLOCKED = "unlocked" + +class ListBlobsIncludeItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + COPY = "copy" + DELETED = "deleted" + METADATA = "metadata" + SNAPSHOTS = "snapshots" + UNCOMMITTEDBLOBS = "uncommittedblobs" + VERSIONS = "versions" + TAGS = "tags" + IMMUTABILITYPOLICY = "immutabilitypolicy" + LEGALHOLD = "legalhold" + DELETEDWITHVERSIONS = "deletedwithversions" + +class ListContainersIncludeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + METADATA = "metadata" + DELETED = "deleted" + +class PathRenameMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + LEGACY = "legacy" + POSIX = "posix" + +class PremiumPageBlobAccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + P4 = "P4" + P6 = "P6" + P10 = "P10" + P15 = "P15" + P20 = "P20" + P30 = "P30" + P40 = "P40" + P50 = "P50" + P60 = "P60" + P70 = "P70" + P80 = "P80" + +class PublicAccessType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + CONTAINER = "container" + BLOB = "blob" + +class QueryFormatType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The quick query format type. + """ + + DELIMITED = "delimited" + JSON = "json" + ARROW = "arrow" + PARQUET = "parquet" + +class RehydratePriority(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """If an object is in rehydrate pending state then this header is returned with priority of + rehydrate. Valid values are High and Standard. + """ + + HIGH = "High" + STANDARD = "Standard" + +class SequenceNumberActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + MAX = "max" + UPDATE = "update" + INCREMENT = "increment" + +class SkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + STANDARD_LRS = "Standard_LRS" + STANDARD_GRS = "Standard_GRS" + STANDARD_RAGRS = "Standard_RAGRS" + STANDARD_ZRS = "Standard_ZRS" + PREMIUM_LRS = "Premium_LRS" + +class StorageErrorCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Error codes returned by the service + """ + + ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists" + ACCOUNT_BEING_CREATED = "AccountBeingCreated" + ACCOUNT_IS_DISABLED = "AccountIsDisabled" + AUTHENTICATION_FAILED = "AuthenticationFailed" + AUTHORIZATION_FAILURE = "AuthorizationFailure" + CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported" + CONDITION_NOT_MET = "ConditionNotMet" + EMPTY_METADATA_KEY = "EmptyMetadataKey" + INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions" + INTERNAL_ERROR = "InternalError" + INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo" + INVALID_HEADER_VALUE = "InvalidHeaderValue" + INVALID_HTTP_VERB = "InvalidHttpVerb" + INVALID_INPUT = "InvalidInput" + INVALID_MD5 = "InvalidMd5" + INVALID_METADATA = "InvalidMetadata" + INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue" + INVALID_RANGE = "InvalidRange" + INVALID_RESOURCE_NAME = "InvalidResourceName" + INVALID_URI = "InvalidUri" + INVALID_XML_DOCUMENT = "InvalidXmlDocument" + INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue" + MD5_MISMATCH = "Md5Mismatch" + METADATA_TOO_LARGE = "MetadataTooLarge" + MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader" + MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter" + MISSING_REQUIRED_HEADER = "MissingRequiredHeader" + MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode" + MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported" + OPERATION_TIMED_OUT = "OperationTimedOut" + OUT_OF_RANGE_INPUT = "OutOfRangeInput" + OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue" + REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge" + RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch" + REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse" + RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists" + RESOURCE_NOT_FOUND = "ResourceNotFound" + SERVER_BUSY = "ServerBusy" + UNSUPPORTED_HEADER = "UnsupportedHeader" + UNSUPPORTED_XML_NODE = "UnsupportedXmlNode" + UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter" + UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb" + APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet" + BLOB_ALREADY_EXISTS = "BlobAlreadyExists" + BLOB_IMMUTABLE_DUE_TO_POLICY = "BlobImmutableDueToPolicy" + BLOB_NOT_FOUND = "BlobNotFound" + BLOB_OVERWRITTEN = "BlobOverwritten" + BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength" + BLOB_USES_CUSTOMER_SPECIFIED_ENCRYPTION = "BlobUsesCustomerSpecifiedEncryption" + BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit" + BLOCK_LIST_TOO_LONG = "BlockListTooLong" + CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier" + CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource" + CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists" + CONTAINER_BEING_DELETED = "ContainerBeingDeleted" + CONTAINER_DISABLED = "ContainerDisabled" + CONTAINER_NOT_FOUND = "ContainerNotFound" + CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit" + COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported" + COPY_ID_MISMATCH = "CopyIdMismatch" + FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch" + INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch" + INCREMENTAL_COPY_OF_ERALIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot" + INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired" + INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock" + INVALID_BLOB_TIER = "InvalidBlobTier" + INVALID_BLOB_TYPE = "InvalidBlobType" + INVALID_BLOCK_ID = "InvalidBlockId" + INVALID_BLOCK_LIST = "InvalidBlockList" + INVALID_OPERATION = "InvalidOperation" + INVALID_PAGE_RANGE = "InvalidPageRange" + INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType" + INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl" + INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation" + LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent" + LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken" + LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation" + LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation" + LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation" + LEASE_ID_MISSING = "LeaseIdMissing" + LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired" + LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged" + LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed" + LEASE_LOST = "LeaseLost" + LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation" + LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation" + LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation" + MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet" + NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation" + NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation" + OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob" + PENDING_COPY_OPERATION = "PendingCopyOperation" + PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer" + PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound" + PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported" + SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet" + SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge" + SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded" + SNAPHOT_OPERATION_RATE_EXCEEDED = "SnaphotOperationRateExceeded" + SNAPSHOTS_PRESENT = "SnapshotsPresent" + SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet" + SYSTEM_IN_USE = "SystemInUse" + TARGET_CONDITION_NOT_MET = "TargetConditionNotMet" + UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite" + BLOB_BEING_REHYDRATED = "BlobBeingRehydrated" + BLOB_ARCHIVED = "BlobArchived" + BLOB_NOT_ARCHIVED = "BlobNotArchived" + AUTHORIZATION_SOURCE_IP_MISMATCH = "AuthorizationSourceIPMismatch" + AUTHORIZATION_PROTOCOL_MISMATCH = "AuthorizationProtocolMismatch" + AUTHORIZATION_PERMISSION_MISMATCH = "AuthorizationPermissionMismatch" + AUTHORIZATION_SERVICE_MISMATCH = "AuthorizationServiceMismatch" + AUTHORIZATION_RESOURCE_TYPE_MISMATCH = "AuthorizationResourceTypeMismatch" diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_models.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_models.py new file mode 100644 index 0000000..b8e178d --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_models.py @@ -0,0 +1,2055 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import HttpResponseError +import msrest.serialization + + +class AccessPolicy(msrest.serialization.Model): + """An Access policy. + + :param start: the date-time the policy is active. + :type start: str + :param expiry: the date-time the policy expires. + :type expiry: str + :param permission: the permissions for the acl policy. + :type permission: str + """ + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str'}, + 'expiry': {'key': 'Expiry', 'type': 'str'}, + 'permission': {'key': 'Permission', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(AccessPolicy, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.expiry = kwargs.get('expiry', None) + self.permission = kwargs.get('permission', None) + + +class AppendPositionAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param max_size: Optional conditional header. The max length in bytes permitted for the append + blob. If the Append Block operation would cause the blob to exceed that limit or if the blob + size is already greater than the value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :type max_size: long + :param append_position: Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will succeed only if the append + position is equal to this number. If it is not, the request will fail with the + AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + :type append_position: long + """ + + _attribute_map = { + 'max_size': {'key': 'maxSize', 'type': 'long'}, + 'append_position': {'key': 'appendPosition', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(AppendPositionAccessConditions, self).__init__(**kwargs) + self.max_size = kwargs.get('max_size', None) + self.append_position = kwargs.get('append_position', None) + + +class ArrowConfiguration(msrest.serialization.Model): + """arrow configuration. + + All required parameters must be populated in order to send to Azure. + + :param schema: Required. + :type schema: list[~azure.storage.blob.models.ArrowField] + """ + + _validation = { + 'schema': {'required': True}, + } + + _attribute_map = { + 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'wrapped': True, 'itemsName': 'Field'}}, + } + _xml_map = { + 'name': 'ArrowConfiguration' + } + + def __init__( + self, + **kwargs + ): + super(ArrowConfiguration, self).__init__(**kwargs) + self.schema = kwargs['schema'] + + +class ArrowField(msrest.serialization.Model): + """field of an arrow schema. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. + :type type: str + :param name: + :type name: str + :param precision: + :type precision: int + :param scale: + :type scale: int + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str'}, + 'name': {'key': 'Name', 'type': 'str'}, + 'precision': {'key': 'Precision', 'type': 'int'}, + 'scale': {'key': 'Scale', 'type': 'int'}, + } + _xml_map = { + 'name': 'Field' + } + + def __init__( + self, + **kwargs + ): + super(ArrowField, self).__init__(**kwargs) + self.type = kwargs['type'] + self.name = kwargs.get('name', None) + self.precision = kwargs.get('precision', None) + self.scale = kwargs.get('scale', None) + + +class BlobFlatListSegment(msrest.serialization.Model): + """BlobFlatListSegment. + + All required parameters must be populated in order to send to Azure. + + :param blob_items: Required. + :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] + """ + + _validation = { + 'blob_items': {'required': True}, + } + + _attribute_map = { + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, + } + _xml_map = { + 'name': 'Blobs' + } + + def __init__( + self, + **kwargs + ): + super(BlobFlatListSegment, self).__init__(**kwargs) + self.blob_items = kwargs['blob_items'] + + +class BlobHierarchyListSegment(msrest.serialization.Model): + """BlobHierarchyListSegment. + + All required parameters must be populated in order to send to Azure. + + :param blob_prefixes: + :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] + :param blob_items: Required. + :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] + """ + + _validation = { + 'blob_items': {'required': True}, + } + + _attribute_map = { + 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix'}}, + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, + } + _xml_map = { + 'name': 'Blobs' + } + + def __init__( + self, + **kwargs + ): + super(BlobHierarchyListSegment, self).__init__(**kwargs) + self.blob_prefixes = kwargs.get('blob_prefixes', None) + self.blob_items = kwargs['blob_items'] + + +class BlobHTTPHeaders(msrest.serialization.Model): + """Parameter group. + + :param blob_cache_control: Optional. Sets the blob's cache control. If specified, this property + is stored with the blob and returned with a read request. + :type blob_cache_control: str + :param blob_content_type: Optional. Sets the blob's content type. If specified, this property + is stored with the blob and returned with a read request. + :type blob_content_type: str + :param blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash is not + validated, as the hashes for the individual blocks were validated when each was uploaded. + :type blob_content_md5: bytearray + :param blob_content_encoding: Optional. Sets the blob's content encoding. If specified, this + property is stored with the blob and returned with a read request. + :type blob_content_encoding: str + :param blob_content_language: Optional. Set the blob's content language. If specified, this + property is stored with the blob and returned with a read request. + :type blob_content_language: str + :param blob_content_disposition: Optional. Sets the blob's Content-Disposition header. + :type blob_content_disposition: str + """ + + _attribute_map = { + 'blob_cache_control': {'key': 'blobCacheControl', 'type': 'str'}, + 'blob_content_type': {'key': 'blobContentType', 'type': 'str'}, + 'blob_content_md5': {'key': 'blobContentMD5', 'type': 'bytearray'}, + 'blob_content_encoding': {'key': 'blobContentEncoding', 'type': 'str'}, + 'blob_content_language': {'key': 'blobContentLanguage', 'type': 'str'}, + 'blob_content_disposition': {'key': 'blobContentDisposition', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(BlobHTTPHeaders, self).__init__(**kwargs) + self.blob_cache_control = kwargs.get('blob_cache_control', None) + self.blob_content_type = kwargs.get('blob_content_type', None) + self.blob_content_md5 = kwargs.get('blob_content_md5', None) + self.blob_content_encoding = kwargs.get('blob_content_encoding', None) + self.blob_content_language = kwargs.get('blob_content_language', None) + self.blob_content_disposition = kwargs.get('blob_content_disposition', None) + + +class BlobItemInternal(msrest.serialization.Model): + """An Azure Storage blob. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param deleted: Required. + :type deleted: bool + :param snapshot: Required. + :type snapshot: str + :param version_id: + :type version_id: str + :param is_current_version: + :type is_current_version: bool + :param properties: Required. Properties of a blob. + :type properties: ~azure.storage.blob.models.BlobPropertiesInternal + :param metadata: + :type metadata: ~azure.storage.blob.models.BlobMetadata + :param blob_tags: Blob tags. + :type blob_tags: ~azure.storage.blob.models.BlobTags + :param object_replication_metadata: Dictionary of :code:``. + :type object_replication_metadata: dict[str, str] + :param has_versions_only: + :type has_versions_only: bool + """ + + _validation = { + 'name': {'required': True}, + 'deleted': {'required': True}, + 'snapshot': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'deleted': {'key': 'Deleted', 'type': 'bool'}, + 'snapshot': {'key': 'Snapshot', 'type': 'str'}, + 'version_id': {'key': 'VersionId', 'type': 'str'}, + 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, + 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, + 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata'}, + 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags'}, + 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}'}, + 'has_versions_only': {'key': 'HasVersionsOnly', 'type': 'bool'}, + } + _xml_map = { + 'name': 'Blob' + } + + def __init__( + self, + **kwargs + ): + super(BlobItemInternal, self).__init__(**kwargs) + self.name = kwargs['name'] + self.deleted = kwargs['deleted'] + self.snapshot = kwargs['snapshot'] + self.version_id = kwargs.get('version_id', None) + self.is_current_version = kwargs.get('is_current_version', None) + self.properties = kwargs['properties'] + self.metadata = kwargs.get('metadata', None) + self.blob_tags = kwargs.get('blob_tags', None) + self.object_replication_metadata = kwargs.get('object_replication_metadata', None) + self.has_versions_only = kwargs.get('has_versions_only', None) + + +class BlobMetadata(msrest.serialization.Model): + """BlobMetadata. + + :param additional_properties: Unmatched properties from the message are deserialized to this + collection. + :type additional_properties: dict[str, str] + :param encrypted: + :type encrypted: str + """ + + _attribute_map = { + 'additional_properties': {'key': '', 'type': '{str}'}, + 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'attr': True}}, + } + _xml_map = { + 'name': 'Metadata' + } + + def __init__( + self, + **kwargs + ): + super(BlobMetadata, self).__init__(**kwargs) + self.additional_properties = kwargs.get('additional_properties', None) + self.encrypted = kwargs.get('encrypted', None) + + +class BlobPrefix(msrest.serialization.Model): + """BlobPrefix. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(BlobPrefix, self).__init__(**kwargs) + self.name = kwargs['name'] + + +class BlobPropertiesInternal(msrest.serialization.Model): + """Properties of a blob. + + All required parameters must be populated in order to send to Azure. + + :param creation_time: + :type creation_time: ~datetime.datetime + :param last_modified: Required. + :type last_modified: ~datetime.datetime + :param etag: Required. + :type etag: str + :param content_length: Size in bytes. + :type content_length: long + :param content_type: + :type content_type: str + :param content_encoding: + :type content_encoding: str + :param content_language: + :type content_language: str + :param content_md5: + :type content_md5: bytearray + :param content_disposition: + :type content_disposition: str + :param cache_control: + :type cache_control: str + :param blob_sequence_number: + :type blob_sequence_number: long + :param blob_type: Possible values include: "BlockBlob", "PageBlob", "AppendBlob". + :type blob_type: str or ~azure.storage.blob.models.BlobType + :param lease_status: Possible values include: "locked", "unlocked". + :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType + :param lease_state: Possible values include: "available", "leased", "expired", "breaking", + "broken". + :type lease_state: str or ~azure.storage.blob.models.LeaseStateType + :param lease_duration: Possible values include: "infinite", "fixed". + :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType + :param copy_id: + :type copy_id: str + :param copy_status: Possible values include: "pending", "success", "aborted", "failed". + :type copy_status: str or ~azure.storage.blob.models.CopyStatusType + :param copy_source: + :type copy_source: str + :param copy_progress: + :type copy_progress: str + :param copy_completion_time: + :type copy_completion_time: ~datetime.datetime + :param copy_status_description: + :type copy_status_description: str + :param server_encrypted: + :type server_encrypted: bool + :param incremental_copy: + :type incremental_copy: bool + :param destination_snapshot: + :type destination_snapshot: str + :param deleted_time: + :type deleted_time: ~datetime.datetime + :param remaining_retention_days: + :type remaining_retention_days: int + :param access_tier: Possible values include: "P4", "P6", "P10", "P15", "P20", "P30", "P40", + "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive". + :type access_tier: str or ~azure.storage.blob.models.AccessTier + :param access_tier_inferred: + :type access_tier_inferred: bool + :param archive_status: Possible values include: "rehydrate-pending-to-hot", + "rehydrate-pending-to-cool". + :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus + :param customer_provided_key_sha256: + :type customer_provided_key_sha256: str + :param encryption_scope: The name of the encryption scope under which the blob is encrypted. + :type encryption_scope: str + :param access_tier_change_time: + :type access_tier_change_time: ~datetime.datetime + :param tag_count: + :type tag_count: int + :param expires_on: + :type expires_on: ~datetime.datetime + :param is_sealed: + :type is_sealed: bool + :param rehydrate_priority: If an object is in rehydrate pending state then this header is + returned with priority of rehydrate. Valid values are High and Standard. Possible values + include: "High", "Standard". + :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :param last_accessed_on: + :type last_accessed_on: ~datetime.datetime + :param immutability_policy_expires_on: + :type immutability_policy_expires_on: ~datetime.datetime + :param immutability_policy_mode: Possible values include: "Unlocked", "Locked", "Mutable". + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: + :type legal_hold: bool + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + } + + _attribute_map = { + 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, + 'etag': {'key': 'Etag', 'type': 'str'}, + 'content_length': {'key': 'Content-Length', 'type': 'long'}, + 'content_type': {'key': 'Content-Type', 'type': 'str'}, + 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, + 'content_language': {'key': 'Content-Language', 'type': 'str'}, + 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, + 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, + 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, + 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, + 'blob_type': {'key': 'BlobType', 'type': 'str'}, + 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, + 'lease_state': {'key': 'LeaseState', 'type': 'str'}, + 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, + 'copy_id': {'key': 'CopyId', 'type': 'str'}, + 'copy_status': {'key': 'CopyStatus', 'type': 'str'}, + 'copy_source': {'key': 'CopySource', 'type': 'str'}, + 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, + 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, + 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, + 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, + 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, + 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, + 'access_tier': {'key': 'AccessTier', 'type': 'str'}, + 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, + 'archive_status': {'key': 'ArchiveStatus', 'type': 'str'}, + 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, + 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, + 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, + 'tag_count': {'key': 'TagCount', 'type': 'int'}, + 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, + 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, + 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str'}, + 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, + 'immutability_policy_expires_on': {'key': 'ImmutabilityPolicyUntilDate', 'type': 'rfc-1123'}, + 'immutability_policy_mode': {'key': 'ImmutabilityPolicyMode', 'type': 'str'}, + 'legal_hold': {'key': 'LegalHold', 'type': 'bool'}, + } + _xml_map = { + 'name': 'Properties' + } + + def __init__( + self, + **kwargs + ): + super(BlobPropertiesInternal, self).__init__(**kwargs) + self.creation_time = kwargs.get('creation_time', None) + self.last_modified = kwargs['last_modified'] + self.etag = kwargs['etag'] + self.content_length = kwargs.get('content_length', None) + self.content_type = kwargs.get('content_type', None) + self.content_encoding = kwargs.get('content_encoding', None) + self.content_language = kwargs.get('content_language', None) + self.content_md5 = kwargs.get('content_md5', None) + self.content_disposition = kwargs.get('content_disposition', None) + self.cache_control = kwargs.get('cache_control', None) + self.blob_sequence_number = kwargs.get('blob_sequence_number', None) + self.blob_type = kwargs.get('blob_type', None) + self.lease_status = kwargs.get('lease_status', None) + self.lease_state = kwargs.get('lease_state', None) + self.lease_duration = kwargs.get('lease_duration', None) + self.copy_id = kwargs.get('copy_id', None) + self.copy_status = kwargs.get('copy_status', None) + self.copy_source = kwargs.get('copy_source', None) + self.copy_progress = kwargs.get('copy_progress', None) + self.copy_completion_time = kwargs.get('copy_completion_time', None) + self.copy_status_description = kwargs.get('copy_status_description', None) + self.server_encrypted = kwargs.get('server_encrypted', None) + self.incremental_copy = kwargs.get('incremental_copy', None) + self.destination_snapshot = kwargs.get('destination_snapshot', None) + self.deleted_time = kwargs.get('deleted_time', None) + self.remaining_retention_days = kwargs.get('remaining_retention_days', None) + self.access_tier = kwargs.get('access_tier', None) + self.access_tier_inferred = kwargs.get('access_tier_inferred', None) + self.archive_status = kwargs.get('archive_status', None) + self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None) + self.encryption_scope = kwargs.get('encryption_scope', None) + self.access_tier_change_time = kwargs.get('access_tier_change_time', None) + self.tag_count = kwargs.get('tag_count', None) + self.expires_on = kwargs.get('expires_on', None) + self.is_sealed = kwargs.get('is_sealed', None) + self.rehydrate_priority = kwargs.get('rehydrate_priority', None) + self.last_accessed_on = kwargs.get('last_accessed_on', None) + self.immutability_policy_expires_on = kwargs.get('immutability_policy_expires_on', None) + self.immutability_policy_mode = kwargs.get('immutability_policy_mode', None) + self.legal_hold = kwargs.get('legal_hold', None) + + +class BlobTag(msrest.serialization.Model): + """BlobTag. + + All required parameters must be populated in order to send to Azure. + + :param key: Required. + :type key: str + :param value: Required. + :type value: str + """ + + _validation = { + 'key': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'key': {'key': 'Key', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'str'}, + } + _xml_map = { + 'name': 'Tag' + } + + def __init__( + self, + **kwargs + ): + super(BlobTag, self).__init__(**kwargs) + self.key = kwargs['key'] + self.value = kwargs['value'] + + +class BlobTags(msrest.serialization.Model): + """Blob tags. + + All required parameters must be populated in order to send to Azure. + + :param blob_tag_set: Required. + :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] + """ + + _validation = { + 'blob_tag_set': {'required': True}, + } + + _attribute_map = { + 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'wrapped': True, 'itemsName': 'Tag'}}, + } + _xml_map = { + 'name': 'Tags' + } + + def __init__( + self, + **kwargs + ): + super(BlobTags, self).__init__(**kwargs) + self.blob_tag_set = kwargs['blob_tag_set'] + + +class Block(msrest.serialization.Model): + """Represents a single block in a block blob. It describes the block's ID and size. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The base64 encoded block ID. + :type name: str + :param size: Required. The block size in bytes. + :type size: long + """ + + _validation = { + 'name': {'required': True}, + 'size': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'size': {'key': 'Size', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(Block, self).__init__(**kwargs) + self.name = kwargs['name'] + self.size = kwargs['size'] + + +class BlockList(msrest.serialization.Model): + """BlockList. + + :param committed_blocks: + :type committed_blocks: list[~azure.storage.blob.models.Block] + :param uncommitted_blocks: + :type uncommitted_blocks: list[~azure.storage.blob.models.Block] + """ + + _attribute_map = { + 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, + 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, + } + + def __init__( + self, + **kwargs + ): + super(BlockList, self).__init__(**kwargs) + self.committed_blocks = kwargs.get('committed_blocks', None) + self.uncommitted_blocks = kwargs.get('uncommitted_blocks', None) + + +class BlockLookupList(msrest.serialization.Model): + """BlockLookupList. + + :param committed: + :type committed: list[str] + :param uncommitted: + :type uncommitted: list[str] + :param latest: + :type latest: list[str] + """ + + _attribute_map = { + 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'itemsName': 'Committed'}}, + 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'itemsName': 'Uncommitted'}}, + 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'itemsName': 'Latest'}}, + } + _xml_map = { + 'name': 'BlockList' + } + + def __init__( + self, + **kwargs + ): + super(BlockLookupList, self).__init__(**kwargs) + self.committed = kwargs.get('committed', None) + self.uncommitted = kwargs.get('uncommitted', None) + self.latest = kwargs.get('latest', None) + + +class ClearRange(msrest.serialization.Model): + """ClearRange. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. + :type start: long + :param end: Required. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'ClearRange' + } + + def __init__( + self, + **kwargs + ): + super(ClearRange, self).__init__(**kwargs) + self.start = kwargs['start'] + self.end = kwargs['end'] + + +class ContainerCpkScopeInfo(msrest.serialization.Model): + """Parameter group. + + :param default_encryption_scope: Optional. Version 2019-07-07 and later. Specifies the + default encryption scope to set on the container and use for all future writes. + :type default_encryption_scope: str + :param prevent_encryption_scope_override: Optional. Version 2019-07-07 and newer. If true, + prevents any request from specifying a different encryption scope than the scope set on the + container. + :type prevent_encryption_scope_override: bool + """ + + _attribute_map = { + 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, + 'prevent_encryption_scope_override': {'key': 'PreventEncryptionScopeOverride', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(ContainerCpkScopeInfo, self).__init__(**kwargs) + self.default_encryption_scope = kwargs.get('default_encryption_scope', None) + self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) + + +class ContainerItem(msrest.serialization.Model): + """An Azure Storage container. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param deleted: + :type deleted: bool + :param version: + :type version: str + :param properties: Required. Properties of a container. + :type properties: ~azure.storage.blob.models.ContainerProperties + :param metadata: Dictionary of :code:``. + :type metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'deleted': {'key': 'Deleted', 'type': 'bool'}, + 'version': {'key': 'Version', 'type': 'str'}, + 'properties': {'key': 'Properties', 'type': 'ContainerProperties'}, + 'metadata': {'key': 'Metadata', 'type': '{str}'}, + } + _xml_map = { + 'name': 'Container' + } + + def __init__( + self, + **kwargs + ): + super(ContainerItem, self).__init__(**kwargs) + self.name = kwargs['name'] + self.deleted = kwargs.get('deleted', None) + self.version = kwargs.get('version', None) + self.properties = kwargs['properties'] + self.metadata = kwargs.get('metadata', None) + + +class ContainerProperties(msrest.serialization.Model): + """Properties of a container. + + All required parameters must be populated in order to send to Azure. + + :param last_modified: Required. + :type last_modified: ~datetime.datetime + :param etag: Required. + :type etag: str + :param lease_status: Possible values include: "locked", "unlocked". + :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType + :param lease_state: Possible values include: "available", "leased", "expired", "breaking", + "broken". + :type lease_state: str or ~azure.storage.blob.models.LeaseStateType + :param lease_duration: Possible values include: "infinite", "fixed". + :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType + :param public_access: Possible values include: "container", "blob". + :type public_access: str or ~azure.storage.blob.models.PublicAccessType + :param has_immutability_policy: + :type has_immutability_policy: bool + :param has_legal_hold: + :type has_legal_hold: bool + :param default_encryption_scope: + :type default_encryption_scope: str + :param prevent_encryption_scope_override: + :type prevent_encryption_scope_override: bool + :param deleted_time: + :type deleted_time: ~datetime.datetime + :param remaining_retention_days: + :type remaining_retention_days: int + :param is_immutable_storage_with_versioning_enabled: Indicates if version level worm is enabled + on this container. + :type is_immutable_storage_with_versioning_enabled: bool + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + } + + _attribute_map = { + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, + 'etag': {'key': 'Etag', 'type': 'str'}, + 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, + 'lease_state': {'key': 'LeaseState', 'type': 'str'}, + 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, + 'public_access': {'key': 'PublicAccess', 'type': 'str'}, + 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool'}, + 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool'}, + 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, + 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool'}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, + 'is_immutable_storage_with_versioning_enabled': {'key': 'ImmutableStorageWithVersioningEnabled', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(ContainerProperties, self).__init__(**kwargs) + self.last_modified = kwargs['last_modified'] + self.etag = kwargs['etag'] + self.lease_status = kwargs.get('lease_status', None) + self.lease_state = kwargs.get('lease_state', None) + self.lease_duration = kwargs.get('lease_duration', None) + self.public_access = kwargs.get('public_access', None) + self.has_immutability_policy = kwargs.get('has_immutability_policy', None) + self.has_legal_hold = kwargs.get('has_legal_hold', None) + self.default_encryption_scope = kwargs.get('default_encryption_scope', None) + self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) + self.deleted_time = kwargs.get('deleted_time', None) + self.remaining_retention_days = kwargs.get('remaining_retention_days', None) + self.is_immutable_storage_with_versioning_enabled = kwargs.get('is_immutable_storage_with_versioning_enabled', None) + + +class CorsRule(msrest.serialization.Model): + """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param allowed_origins: Required. The origin domains that are permitted to make a request + against the storage service via CORS. The origin domain is the domain from which the request + originates. Note that the origin must be an exact case-sensitive match with the origin that the + user age sends to the service. You can also use the wildcard character '*' to allow all origin + domains to make requests via CORS. + :type allowed_origins: str + :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may + use for a CORS request. (comma separated). + :type allowed_methods: str + :param allowed_headers: Required. the request headers that the origin domain may specify on the + CORS request. + :type allowed_headers: str + :param exposed_headers: Required. The response headers that may be sent in the response to the + CORS request and exposed by the browser to the request issuer. + :type exposed_headers: str + :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the + preflight OPTIONS request. + :type max_age_in_seconds: int + """ + + _validation = { + 'allowed_origins': {'required': True}, + 'allowed_methods': {'required': True}, + 'allowed_headers': {'required': True}, + 'exposed_headers': {'required': True}, + 'max_age_in_seconds': {'required': True, 'minimum': 0}, + } + + _attribute_map = { + 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, + 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, + 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, + 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, + 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(CorsRule, self).__init__(**kwargs) + self.allowed_origins = kwargs['allowed_origins'] + self.allowed_methods = kwargs['allowed_methods'] + self.allowed_headers = kwargs['allowed_headers'] + self.exposed_headers = kwargs['exposed_headers'] + self.max_age_in_seconds = kwargs['max_age_in_seconds'] + + +class CpkInfo(msrest.serialization.Model): + """Parameter group. + + :param encryption_key: Optional. Specifies the encryption key to use to encrypt the data + provided in the request. If not specified, encryption is performed with the root account + encryption key. For more information, see Encryption at Rest for Azure Storage Services. + :type encryption_key: str + :param encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided + if the x-ms-encryption-key header is provided. + :type encryption_key_sha256: str + :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, + the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is + provided. Possible values include: "None", "AES256". + :type encryption_algorithm: str or ~azure.storage.blob.models.EncryptionAlgorithmType + """ + + _attribute_map = { + 'encryption_key': {'key': 'encryptionKey', 'type': 'str'}, + 'encryption_key_sha256': {'key': 'encryptionKeySha256', 'type': 'str'}, + 'encryption_algorithm': {'key': 'encryptionAlgorithm', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(CpkInfo, self).__init__(**kwargs) + self.encryption_key = kwargs.get('encryption_key', None) + self.encryption_key_sha256 = kwargs.get('encryption_key_sha256', None) + self.encryption_algorithm = kwargs.get('encryption_algorithm', None) + + +class CpkScopeInfo(msrest.serialization.Model): + """Parameter group. + + :param encryption_scope: Optional. Version 2019-07-07 and later. Specifies the name of the + encryption scope to use to encrypt the data provided in the request. If not specified, + encryption is performed with the default account encryption scope. For more information, see + Encryption at Rest for Azure Storage Services. + :type encryption_scope: str + """ + + _attribute_map = { + 'encryption_scope': {'key': 'encryptionScope', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(CpkScopeInfo, self).__init__(**kwargs) + self.encryption_scope = kwargs.get('encryption_scope', None) + + +class DataLakeStorageError(msrest.serialization.Model): + """DataLakeStorageError. + + :param data_lake_storage_error_details: The service error response object. + :type data_lake_storage_error_details: ~azure.storage.blob.models.DataLakeStorageErrorError + """ + + _attribute_map = { + 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorError'}, + } + + def __init__( + self, + **kwargs + ): + super(DataLakeStorageError, self).__init__(**kwargs) + self.data_lake_storage_error_details = kwargs.get('data_lake_storage_error_details', None) + + +class DataLakeStorageErrorError(msrest.serialization.Model): + """The service error response object. + + :param code: The service error code. + :type code: str + :param message: The service error message. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'Code', 'type': 'str'}, + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(DataLakeStorageErrorError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + + +class DelimitedTextConfiguration(msrest.serialization.Model): + """delimited text configuration. + + All required parameters must be populated in order to send to Azure. + + :param column_separator: Required. column separator. + :type column_separator: str + :param field_quote: Required. field quote. + :type field_quote: str + :param record_separator: Required. record separator. + :type record_separator: str + :param escape_char: Required. escape char. + :type escape_char: str + :param headers_present: Required. has headers. + :type headers_present: bool + """ + + _validation = { + 'column_separator': {'required': True}, + 'field_quote': {'required': True}, + 'record_separator': {'required': True}, + 'escape_char': {'required': True}, + 'headers_present': {'required': True}, + } + + _attribute_map = { + 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, + 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, + 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, + 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, + 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, + } + _xml_map = { + 'name': 'DelimitedTextConfiguration' + } + + def __init__( + self, + **kwargs + ): + super(DelimitedTextConfiguration, self).__init__(**kwargs) + self.column_separator = kwargs['column_separator'] + self.field_quote = kwargs['field_quote'] + self.record_separator = kwargs['record_separator'] + self.escape_char = kwargs['escape_char'] + self.headers_present = kwargs['headers_present'] + + +class DirectoryHttpHeaders(msrest.serialization.Model): + """Parameter group. + + :param cache_control: Cache control for given resource. + :type cache_control: str + :param content_type: Content type for given resource. + :type content_type: str + :param content_encoding: Content encoding for given resource. + :type content_encoding: str + :param content_language: Content language for given resource. + :type content_language: str + :param content_disposition: Content disposition for given resource. + :type content_disposition: str + """ + + _attribute_map = { + 'cache_control': {'key': 'cacheControl', 'type': 'str'}, + 'content_type': {'key': 'contentType', 'type': 'str'}, + 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, + 'content_language': {'key': 'contentLanguage', 'type': 'str'}, + 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(DirectoryHttpHeaders, self).__init__(**kwargs) + self.cache_control = kwargs.get('cache_control', None) + self.content_type = kwargs.get('content_type', None) + self.content_encoding = kwargs.get('content_encoding', None) + self.content_language = kwargs.get('content_language', None) + self.content_disposition = kwargs.get('content_disposition', None) + + +class FilterBlobItem(msrest.serialization.Model): + """Blob info from a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param container_name: Required. + :type container_name: str + :param tags: A set of tags. Blob tags. + :type tags: ~azure.storage.blob.models.BlobTags + """ + + _validation = { + 'name': {'required': True}, + 'container_name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'container_name': {'key': 'ContainerName', 'type': 'str'}, + 'tags': {'key': 'Tags', 'type': 'BlobTags'}, + } + _xml_map = { + 'name': 'Blob' + } + + def __init__( + self, + **kwargs + ): + super(FilterBlobItem, self).__init__(**kwargs) + self.name = kwargs['name'] + self.container_name = kwargs['container_name'] + self.tags = kwargs.get('tags', None) + + +class FilterBlobSegment(msrest.serialization.Model): + """The result of a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param where: Required. + :type where: str + :param blobs: Required. + :type blobs: list[~azure.storage.blob.models.FilterBlobItem] + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'where': {'required': True}, + 'blobs': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'where': {'key': 'Where', 'type': 'str'}, + 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'wrapped': True, 'itemsName': 'Blob'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + **kwargs + ): + super(FilterBlobSegment, self).__init__(**kwargs) + self.service_endpoint = kwargs['service_endpoint'] + self.where = kwargs['where'] + self.blobs = kwargs['blobs'] + self.next_marker = kwargs.get('next_marker', None) + + +class GeoReplication(msrest.serialization.Model): + """Geo-Replication information for the Secondary Storage Service. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. The status of the secondary location. Possible values include: "live", + "bootstrap", "unavailable". + :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType + :param last_sync_time: Required. A GMT date/time value, to the second. All primary writes + preceding this value are guaranteed to be available for read operations at the secondary. + Primary writes after this point in time may or may not be available for reads. + :type last_sync_time: ~datetime.datetime + """ + + _validation = { + 'status': {'required': True}, + 'last_sync_time': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'Status', 'type': 'str'}, + 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123'}, + } + + def __init__( + self, + **kwargs + ): + super(GeoReplication, self).__init__(**kwargs) + self.status = kwargs['status'] + self.last_sync_time = kwargs['last_sync_time'] + + +class JsonTextConfiguration(msrest.serialization.Model): + """json text configuration. + + All required parameters must be populated in order to send to Azure. + + :param record_separator: Required. record separator. + :type record_separator: str + """ + + _validation = { + 'record_separator': {'required': True}, + } + + _attribute_map = { + 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, + } + _xml_map = { + 'name': 'JsonTextConfiguration' + } + + def __init__( + self, + **kwargs + ): + super(JsonTextConfiguration, self).__init__(**kwargs) + self.record_separator = kwargs['record_separator'] + + +class KeyInfo(msrest.serialization.Model): + """Key information. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The date-time the key is active in ISO 8601 UTC time. + :type start: str + :param expiry: Required. The date-time the key expires in ISO 8601 UTC time. + :type expiry: str + """ + + _validation = { + 'start': {'required': True}, + 'expiry': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str'}, + 'expiry': {'key': 'Expiry', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(KeyInfo, self).__init__(**kwargs) + self.start = kwargs['start'] + self.expiry = kwargs['expiry'] + + +class LeaseAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param lease_id: If specified, the operation only succeeds if the resource's lease is active + and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': 'leaseId', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = kwargs.get('lease_id', None) + + +class ListBlobsFlatSegmentResponse(msrest.serialization.Model): + """An enumeration of blobs. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param container_name: Required. + :type container_name: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param segment: Required. + :type segment: ~azure.storage.blob.models.BlobFlatListSegment + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_name': {'required': True}, + 'segment': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment'}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + **kwargs + ): + super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs['service_endpoint'] + self.container_name = kwargs['container_name'] + self.prefix = kwargs.get('prefix', None) + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.segment = kwargs['segment'] + self.next_marker = kwargs.get('next_marker', None) + + +class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): + """An enumeration of blobs. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param container_name: Required. + :type container_name: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param delimiter: + :type delimiter: str + :param segment: Required. + :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_name': {'required': True}, + 'segment': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'delimiter': {'key': 'Delimiter', 'type': 'str'}, + 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + **kwargs + ): + super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs['service_endpoint'] + self.container_name = kwargs['container_name'] + self.prefix = kwargs.get('prefix', None) + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.delimiter = kwargs.get('delimiter', None) + self.segment = kwargs['segment'] + self.next_marker = kwargs.get('next_marker', None) + + +class ListContainersSegmentResponse(msrest.serialization.Model): + """An enumeration of containers. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param container_items: Required. + :type container_items: list[~azure.storage.blob.models.ContainerItem] + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_items': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'wrapped': True, 'itemsName': 'Container'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + **kwargs + ): + super(ListContainersSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs['service_endpoint'] + self.prefix = kwargs.get('prefix', None) + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.container_items = kwargs['container_items'] + self.next_marker = kwargs.get('next_marker', None) + + +class Logging(msrest.serialization.Model): + """Azure Analytics Logging settings. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of Storage Analytics to configure. + :type version: str + :param delete: Required. Indicates whether all delete requests should be logged. + :type delete: bool + :param read: Required. Indicates whether all read requests should be logged. + :type read: bool + :param write: Required. Indicates whether all write requests should be logged. + :type write: bool + :param retention_policy: Required. the retention policy which determines how long the + associated data should persist. + :type retention_policy: ~azure.storage.blob.models.RetentionPolicy + """ + + _validation = { + 'version': {'required': True}, + 'delete': {'required': True}, + 'read': {'required': True}, + 'write': {'required': True}, + 'retention_policy': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str'}, + 'delete': {'key': 'Delete', 'type': 'bool'}, + 'read': {'key': 'Read', 'type': 'bool'}, + 'write': {'key': 'Write', 'type': 'bool'}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, + } + + def __init__( + self, + **kwargs + ): + super(Logging, self).__init__(**kwargs) + self.version = kwargs['version'] + self.delete = kwargs['delete'] + self.read = kwargs['read'] + self.write = kwargs['write'] + self.retention_policy = kwargs['retention_policy'] + + +class Metrics(msrest.serialization.Model): + """a summary of request statistics grouped by API in hour or minute aggregates for blobs. + + All required parameters must be populated in order to send to Azure. + + :param version: The version of Storage Analytics to configure. + :type version: str + :param enabled: Required. Indicates whether metrics are enabled for the Blob service. + :type enabled: bool + :param include_apis: Indicates whether metrics should generate summary statistics for called + API operations. + :type include_apis: bool + :param retention_policy: the retention policy which determines how long the associated data + should persist. + :type retention_policy: ~azure.storage.blob.models.RetentionPolicy + """ + + _validation = { + 'enabled': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str'}, + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, + } + + def __init__( + self, + **kwargs + ): + super(Metrics, self).__init__(**kwargs) + self.version = kwargs.get('version', None) + self.enabled = kwargs['enabled'] + self.include_apis = kwargs.get('include_apis', None) + self.retention_policy = kwargs.get('retention_policy', None) + + +class ModifiedAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param if_modified_since: Specify this header value to operate only on a blob if it has been + modified since the specified date/time. + :type if_modified_since: ~datetime.datetime + :param if_unmodified_since: Specify this header value to operate only on a blob if it has not + been modified since the specified date/time. + :type if_unmodified_since: ~datetime.datetime + :param if_match: Specify an ETag value to operate only on blobs with a matching value. + :type if_match: str + :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. + :type if_none_match: str + :param if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a + matching value. + :type if_tags: str + """ + + _attribute_map = { + 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, + 'if_match': {'key': 'ifMatch', 'type': 'str'}, + 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, + 'if_tags': {'key': 'ifTags', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ModifiedAccessConditions, self).__init__(**kwargs) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + self.if_tags = kwargs.get('if_tags', None) + + +class PageList(msrest.serialization.Model): + """the list of pages. + + :param page_range: + :type page_range: list[~azure.storage.blob.models.PageRange] + :param clear_range: + :type clear_range: list[~azure.storage.blob.models.ClearRange] + """ + + _attribute_map = { + 'page_range': {'key': 'PageRange', 'type': '[PageRange]'}, + 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]'}, + } + + def __init__( + self, + **kwargs + ): + super(PageList, self).__init__(**kwargs) + self.page_range = kwargs.get('page_range', None) + self.clear_range = kwargs.get('clear_range', None) + + +class PageRange(msrest.serialization.Model): + """PageRange. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. + :type start: long + :param end: Required. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'PageRange' + } + + def __init__( + self, + **kwargs + ): + super(PageRange, self).__init__(**kwargs) + self.start = kwargs['start'] + self.end = kwargs['end'] + + +class QueryFormat(msrest.serialization.Model): + """QueryFormat. + + :param type: The quick query format type. Possible values include: "delimited", "json", + "arrow", "parquet". + :type type: str or ~azure.storage.blob.models.QueryFormatType + :param delimited_text_configuration: delimited text configuration. + :type delimited_text_configuration: ~azure.storage.blob.models.DelimitedTextConfiguration + :param json_text_configuration: json text configuration. + :type json_text_configuration: ~azure.storage.blob.models.JsonTextConfiguration + :param arrow_configuration: arrow configuration. + :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration + :param parquet_text_configuration: Any object. + :type parquet_text_configuration: any + """ + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}}, + 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration'}, + 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration'}, + 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration'}, + 'parquet_text_configuration': {'key': 'ParquetTextConfiguration', 'type': 'object'}, + } + + def __init__( + self, + **kwargs + ): + super(QueryFormat, self).__init__(**kwargs) + self.type = kwargs.get('type', None) + self.delimited_text_configuration = kwargs.get('delimited_text_configuration', None) + self.json_text_configuration = kwargs.get('json_text_configuration', None) + self.arrow_configuration = kwargs.get('arrow_configuration', None) + self.parquet_text_configuration = kwargs.get('parquet_text_configuration', None) + + +class QueryRequest(msrest.serialization.Model): + """the quick query body. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar query_type: Required. the query type. Default value: "SQL". + :vartype query_type: str + :param expression: Required. a query statement. + :type expression: str + :param input_serialization: + :type input_serialization: ~azure.storage.blob.models.QuerySerialization + :param output_serialization: + :type output_serialization: ~azure.storage.blob.models.QuerySerialization + """ + + _validation = { + 'query_type': {'required': True, 'constant': True}, + 'expression': {'required': True}, + } + + _attribute_map = { + 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, + 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, + 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization'}, + 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization'}, + } + _xml_map = { + 'name': 'QueryRequest' + } + + query_type = "SQL" + + def __init__( + self, + **kwargs + ): + super(QueryRequest, self).__init__(**kwargs) + self.expression = kwargs['expression'] + self.input_serialization = kwargs.get('input_serialization', None) + self.output_serialization = kwargs.get('output_serialization', None) + + +class QuerySerialization(msrest.serialization.Model): + """QuerySerialization. + + All required parameters must be populated in order to send to Azure. + + :param format: Required. + :type format: ~azure.storage.blob.models.QueryFormat + """ + + _validation = { + 'format': {'required': True}, + } + + _attribute_map = { + 'format': {'key': 'Format', 'type': 'QueryFormat'}, + } + + def __init__( + self, + **kwargs + ): + super(QuerySerialization, self).__init__(**kwargs) + self.format = kwargs['format'] + + +class RetentionPolicy(msrest.serialization.Model): + """the retention policy which determines how long the associated data should persist. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether a retention policy is enabled for the storage + service. + :type enabled: bool + :param days: Indicates the number of days that metrics or logging or soft-deleted data should + be retained. All data older than this value will be deleted. + :type days: int + :param allow_permanent_delete: Indicates whether permanent delete is allowed on this storage + account. + :type allow_permanent_delete: bool + """ + + _validation = { + 'enabled': {'required': True}, + 'days': {'minimum': 1}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'days': {'key': 'Days', 'type': 'int'}, + 'allow_permanent_delete': {'key': 'AllowPermanentDelete', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(RetentionPolicy, self).__init__(**kwargs) + self.enabled = kwargs['enabled'] + self.days = kwargs.get('days', None) + self.allow_permanent_delete = kwargs.get('allow_permanent_delete', None) + + +class SequenceNumberAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param if_sequence_number_less_than_or_equal_to: Specify this header value to operate only on a + blob if it has a sequence number less than or equal to the specified. + :type if_sequence_number_less_than_or_equal_to: long + :param if_sequence_number_less_than: Specify this header value to operate only on a blob if it + has a sequence number less than the specified. + :type if_sequence_number_less_than: long + :param if_sequence_number_equal_to: Specify this header value to operate only on a blob if it + has the specified sequence number. + :type if_sequence_number_equal_to: long + """ + + _attribute_map = { + 'if_sequence_number_less_than_or_equal_to': {'key': 'ifSequenceNumberLessThanOrEqualTo', 'type': 'long'}, + 'if_sequence_number_less_than': {'key': 'ifSequenceNumberLessThan', 'type': 'long'}, + 'if_sequence_number_equal_to': {'key': 'ifSequenceNumberEqualTo', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(SequenceNumberAccessConditions, self).__init__(**kwargs) + self.if_sequence_number_less_than_or_equal_to = kwargs.get('if_sequence_number_less_than_or_equal_to', None) + self.if_sequence_number_less_than = kwargs.get('if_sequence_number_less_than', None) + self.if_sequence_number_equal_to = kwargs.get('if_sequence_number_equal_to', None) + + +class SignedIdentifier(msrest.serialization.Model): + """signed identifier. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. a unique id. + :type id: str + :param access_policy: An Access policy. + :type access_policy: ~azure.storage.blob.models.AccessPolicy + """ + + _validation = { + 'id': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, + } + _xml_map = { + 'name': 'SignedIdentifier' + } + + def __init__( + self, + **kwargs + ): + super(SignedIdentifier, self).__init__(**kwargs) + self.id = kwargs['id'] + self.access_policy = kwargs.get('access_policy', None) + + +class SourceModifiedAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param source_if_modified_since: Specify this header value to operate only on a blob if it has + been modified since the specified date/time. + :type source_if_modified_since: ~datetime.datetime + :param source_if_unmodified_since: Specify this header value to operate only on a blob if it + has not been modified since the specified date/time. + :type source_if_unmodified_since: ~datetime.datetime + :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. + :type source_if_match: str + :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching + value. + :type source_if_none_match: str + :param source_if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a + matching value. + :type source_if_tags: str + """ + + _attribute_map = { + 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, + 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, + 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, + 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, + 'source_if_tags': {'key': 'sourceIfTags', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_modified_since = kwargs.get('source_if_modified_since', None) + self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) + self.source_if_match = kwargs.get('source_if_match', None) + self.source_if_none_match = kwargs.get('source_if_none_match', None) + self.source_if_tags = kwargs.get('source_if_tags', None) + + +class StaticWebsite(msrest.serialization.Model): + """The properties that enable an account to host a static website. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether this account is hosting a static website. + :type enabled: bool + :param index_document: The default name of the index page under each directory. + :type index_document: str + :param error_document404_path: The absolute path of the custom 404 page. + :type error_document404_path: str + :param default_index_document_path: Absolute path of the default index page. + :type default_index_document_path: str + """ + + _validation = { + 'enabled': {'required': True}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'index_document': {'key': 'IndexDocument', 'type': 'str'}, + 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str'}, + 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(StaticWebsite, self).__init__(**kwargs) + self.enabled = kwargs['enabled'] + self.index_document = kwargs.get('index_document', None) + self.error_document404_path = kwargs.get('error_document404_path', None) + self.default_index_document_path = kwargs.get('default_index_document_path', None) + + +class StorageError(msrest.serialization.Model): + """StorageError. + + :param message: + :type message: str + """ + + _attribute_map = { + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageError, self).__init__(**kwargs) + self.message = kwargs.get('message', None) + + +class StorageServiceProperties(msrest.serialization.Model): + """Storage Service Properties. + + :param logging: Azure Analytics Logging settings. + :type logging: ~azure.storage.blob.models.Logging + :param hour_metrics: a summary of request statistics grouped by API in hour or minute + aggregates for blobs. + :type hour_metrics: ~azure.storage.blob.models.Metrics + :param minute_metrics: a summary of request statistics grouped by API in hour or minute + aggregates for blobs. + :type minute_metrics: ~azure.storage.blob.models.Metrics + :param cors: The set of CORS rules. + :type cors: list[~azure.storage.blob.models.CorsRule] + :param default_service_version: The default version to use for requests to the Blob service if + an incoming request's version is not specified. Possible values include version 2008-10-27 and + all more recent versions. + :type default_service_version: str + :param delete_retention_policy: the retention policy which determines how long the associated + data should persist. + :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy + :param static_website: The properties that enable an account to host a static website. + :type static_website: ~azure.storage.blob.models.StaticWebsite + """ + + _attribute_map = { + 'logging': {'key': 'Logging', 'type': 'Logging'}, + 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, + 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, + 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, + 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str'}, + 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy'}, + 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageServiceProperties, self).__init__(**kwargs) + self.logging = kwargs.get('logging', None) + self.hour_metrics = kwargs.get('hour_metrics', None) + self.minute_metrics = kwargs.get('minute_metrics', None) + self.cors = kwargs.get('cors', None) + self.default_service_version = kwargs.get('default_service_version', None) + self.delete_retention_policy = kwargs.get('delete_retention_policy', None) + self.static_website = kwargs.get('static_website', None) + + +class StorageServiceStats(msrest.serialization.Model): + """Stats for the storage service. + + :param geo_replication: Geo-Replication information for the Secondary Storage Service. + :type geo_replication: ~azure.storage.blob.models.GeoReplication + """ + + _attribute_map = { + 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageServiceStats, self).__init__(**kwargs) + self.geo_replication = kwargs.get('geo_replication', None) + + +class UserDelegationKey(msrest.serialization.Model): + """A user delegation key. + + All required parameters must be populated in order to send to Azure. + + :param signed_oid: Required. The Azure Active Directory object ID in GUID format. + :type signed_oid: str + :param signed_tid: Required. The Azure Active Directory tenant ID in GUID format. + :type signed_tid: str + :param signed_start: Required. The date-time the key is active. + :type signed_start: ~datetime.datetime + :param signed_expiry: Required. The date-time the key expires. + :type signed_expiry: ~datetime.datetime + :param signed_service: Required. Abbreviation of the Azure Storage service that accepts the + key. + :type signed_service: str + :param signed_version: Required. The service version that created the key. + :type signed_version: str + :param value: Required. The key as a base64 string. + :type value: str + """ + + _validation = { + 'signed_oid': {'required': True}, + 'signed_tid': {'required': True}, + 'signed_start': {'required': True}, + 'signed_expiry': {'required': True}, + 'signed_service': {'required': True}, + 'signed_version': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'signed_oid': {'key': 'SignedOid', 'type': 'str'}, + 'signed_tid': {'key': 'SignedTid', 'type': 'str'}, + 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601'}, + 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601'}, + 'signed_service': {'key': 'SignedService', 'type': 'str'}, + 'signed_version': {'key': 'SignedVersion', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(UserDelegationKey, self).__init__(**kwargs) + self.signed_oid = kwargs['signed_oid'] + self.signed_tid = kwargs['signed_tid'] + self.signed_start = kwargs['signed_start'] + self.signed_expiry = kwargs['signed_expiry'] + self.signed_service = kwargs['signed_service'] + self.signed_version = kwargs['signed_version'] + self.value = kwargs['value'] diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_models_py3.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_models_py3.py new file mode 100644 index 0000000..b3a394e --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/models/_models_py3.py @@ -0,0 +1,2333 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +from typing import Any, Dict, List, Optional, Union + +from azure.core.exceptions import HttpResponseError +import msrest.serialization + +from ._azure_blob_storage_enums import * + + +class AccessPolicy(msrest.serialization.Model): + """An Access policy. + + :param start: the date-time the policy is active. + :type start: str + :param expiry: the date-time the policy expires. + :type expiry: str + :param permission: the permissions for the acl policy. + :type permission: str + """ + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str'}, + 'expiry': {'key': 'Expiry', 'type': 'str'}, + 'permission': {'key': 'Permission', 'type': 'str'}, + } + + def __init__( + self, + *, + start: Optional[str] = None, + expiry: Optional[str] = None, + permission: Optional[str] = None, + **kwargs + ): + super(AccessPolicy, self).__init__(**kwargs) + self.start = start + self.expiry = expiry + self.permission = permission + + +class AppendPositionAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param max_size: Optional conditional header. The max length in bytes permitted for the append + blob. If the Append Block operation would cause the blob to exceed that limit or if the blob + size is already greater than the value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :type max_size: long + :param append_position: Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will succeed only if the append + position is equal to this number. If it is not, the request will fail with the + AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + :type append_position: long + """ + + _attribute_map = { + 'max_size': {'key': 'maxSize', 'type': 'long'}, + 'append_position': {'key': 'appendPosition', 'type': 'long'}, + } + + def __init__( + self, + *, + max_size: Optional[int] = None, + append_position: Optional[int] = None, + **kwargs + ): + super(AppendPositionAccessConditions, self).__init__(**kwargs) + self.max_size = max_size + self.append_position = append_position + + +class ArrowConfiguration(msrest.serialization.Model): + """arrow configuration. + + All required parameters must be populated in order to send to Azure. + + :param schema: Required. + :type schema: list[~azure.storage.blob.models.ArrowField] + """ + + _validation = { + 'schema': {'required': True}, + } + + _attribute_map = { + 'schema': {'key': 'Schema', 'type': '[ArrowField]', 'xml': {'name': 'Schema', 'wrapped': True, 'itemsName': 'Field'}}, + } + _xml_map = { + 'name': 'ArrowConfiguration' + } + + def __init__( + self, + *, + schema: List["ArrowField"], + **kwargs + ): + super(ArrowConfiguration, self).__init__(**kwargs) + self.schema = schema + + +class ArrowField(msrest.serialization.Model): + """field of an arrow schema. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. + :type type: str + :param name: + :type name: str + :param precision: + :type precision: int + :param scale: + :type scale: int + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str'}, + 'name': {'key': 'Name', 'type': 'str'}, + 'precision': {'key': 'Precision', 'type': 'int'}, + 'scale': {'key': 'Scale', 'type': 'int'}, + } + _xml_map = { + 'name': 'Field' + } + + def __init__( + self, + *, + type: str, + name: Optional[str] = None, + precision: Optional[int] = None, + scale: Optional[int] = None, + **kwargs + ): + super(ArrowField, self).__init__(**kwargs) + self.type = type + self.name = name + self.precision = precision + self.scale = scale + + +class BlobFlatListSegment(msrest.serialization.Model): + """BlobFlatListSegment. + + All required parameters must be populated in order to send to Azure. + + :param blob_items: Required. + :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] + """ + + _validation = { + 'blob_items': {'required': True}, + } + + _attribute_map = { + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, + } + _xml_map = { + 'name': 'Blobs' + } + + def __init__( + self, + *, + blob_items: List["BlobItemInternal"], + **kwargs + ): + super(BlobFlatListSegment, self).__init__(**kwargs) + self.blob_items = blob_items + + +class BlobHierarchyListSegment(msrest.serialization.Model): + """BlobHierarchyListSegment. + + All required parameters must be populated in order to send to Azure. + + :param blob_prefixes: + :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] + :param blob_items: Required. + :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] + """ + + _validation = { + 'blob_items': {'required': True}, + } + + _attribute_map = { + 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix'}}, + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, + } + _xml_map = { + 'name': 'Blobs' + } + + def __init__( + self, + *, + blob_items: List["BlobItemInternal"], + blob_prefixes: Optional[List["BlobPrefix"]] = None, + **kwargs + ): + super(BlobHierarchyListSegment, self).__init__(**kwargs) + self.blob_prefixes = blob_prefixes + self.blob_items = blob_items + + +class BlobHTTPHeaders(msrest.serialization.Model): + """Parameter group. + + :param blob_cache_control: Optional. Sets the blob's cache control. If specified, this property + is stored with the blob and returned with a read request. + :type blob_cache_control: str + :param blob_content_type: Optional. Sets the blob's content type. If specified, this property + is stored with the blob and returned with a read request. + :type blob_content_type: str + :param blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash is not + validated, as the hashes for the individual blocks were validated when each was uploaded. + :type blob_content_md5: bytearray + :param blob_content_encoding: Optional. Sets the blob's content encoding. If specified, this + property is stored with the blob and returned with a read request. + :type blob_content_encoding: str + :param blob_content_language: Optional. Set the blob's content language. If specified, this + property is stored with the blob and returned with a read request. + :type blob_content_language: str + :param blob_content_disposition: Optional. Sets the blob's Content-Disposition header. + :type blob_content_disposition: str + """ + + _attribute_map = { + 'blob_cache_control': {'key': 'blobCacheControl', 'type': 'str'}, + 'blob_content_type': {'key': 'blobContentType', 'type': 'str'}, + 'blob_content_md5': {'key': 'blobContentMD5', 'type': 'bytearray'}, + 'blob_content_encoding': {'key': 'blobContentEncoding', 'type': 'str'}, + 'blob_content_language': {'key': 'blobContentLanguage', 'type': 'str'}, + 'blob_content_disposition': {'key': 'blobContentDisposition', 'type': 'str'}, + } + + def __init__( + self, + *, + blob_cache_control: Optional[str] = None, + blob_content_type: Optional[str] = None, + blob_content_md5: Optional[bytearray] = None, + blob_content_encoding: Optional[str] = None, + blob_content_language: Optional[str] = None, + blob_content_disposition: Optional[str] = None, + **kwargs + ): + super(BlobHTTPHeaders, self).__init__(**kwargs) + self.blob_cache_control = blob_cache_control + self.blob_content_type = blob_content_type + self.blob_content_md5 = blob_content_md5 + self.blob_content_encoding = blob_content_encoding + self.blob_content_language = blob_content_language + self.blob_content_disposition = blob_content_disposition + + +class BlobItemInternal(msrest.serialization.Model): + """An Azure Storage blob. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param deleted: Required. + :type deleted: bool + :param snapshot: Required. + :type snapshot: str + :param version_id: + :type version_id: str + :param is_current_version: + :type is_current_version: bool + :param properties: Required. Properties of a blob. + :type properties: ~azure.storage.blob.models.BlobPropertiesInternal + :param metadata: + :type metadata: ~azure.storage.blob.models.BlobMetadata + :param blob_tags: Blob tags. + :type blob_tags: ~azure.storage.blob.models.BlobTags + :param object_replication_metadata: Dictionary of :code:``. + :type object_replication_metadata: dict[str, str] + :param has_versions_only: + :type has_versions_only: bool + """ + + _validation = { + 'name': {'required': True}, + 'deleted': {'required': True}, + 'snapshot': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'deleted': {'key': 'Deleted', 'type': 'bool'}, + 'snapshot': {'key': 'Snapshot', 'type': 'str'}, + 'version_id': {'key': 'VersionId', 'type': 'str'}, + 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, + 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, + 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata'}, + 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags'}, + 'object_replication_metadata': {'key': 'OrMetadata', 'type': '{str}'}, + 'has_versions_only': {'key': 'HasVersionsOnly', 'type': 'bool'}, + } + _xml_map = { + 'name': 'Blob' + } + + def __init__( + self, + *, + name: str, + deleted: bool, + snapshot: str, + properties: "BlobPropertiesInternal", + version_id: Optional[str] = None, + is_current_version: Optional[bool] = None, + metadata: Optional["BlobMetadata"] = None, + blob_tags: Optional["BlobTags"] = None, + object_replication_metadata: Optional[Dict[str, str]] = None, + has_versions_only: Optional[bool] = None, + **kwargs + ): + super(BlobItemInternal, self).__init__(**kwargs) + self.name = name + self.deleted = deleted + self.snapshot = snapshot + self.version_id = version_id + self.is_current_version = is_current_version + self.properties = properties + self.metadata = metadata + self.blob_tags = blob_tags + self.object_replication_metadata = object_replication_metadata + self.has_versions_only = has_versions_only + + +class BlobMetadata(msrest.serialization.Model): + """BlobMetadata. + + :param additional_properties: Unmatched properties from the message are deserialized to this + collection. + :type additional_properties: dict[str, str] + :param encrypted: + :type encrypted: str + """ + + _attribute_map = { + 'additional_properties': {'key': '', 'type': '{str}'}, + 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'attr': True}}, + } + _xml_map = { + 'name': 'Metadata' + } + + def __init__( + self, + *, + additional_properties: Optional[Dict[str, str]] = None, + encrypted: Optional[str] = None, + **kwargs + ): + super(BlobMetadata, self).__init__(**kwargs) + self.additional_properties = additional_properties + self.encrypted = encrypted + + +class BlobPrefix(msrest.serialization.Model): + """BlobPrefix. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(BlobPrefix, self).__init__(**kwargs) + self.name = name + + +class BlobPropertiesInternal(msrest.serialization.Model): + """Properties of a blob. + + All required parameters must be populated in order to send to Azure. + + :param creation_time: + :type creation_time: ~datetime.datetime + :param last_modified: Required. + :type last_modified: ~datetime.datetime + :param etag: Required. + :type etag: str + :param content_length: Size in bytes. + :type content_length: long + :param content_type: + :type content_type: str + :param content_encoding: + :type content_encoding: str + :param content_language: + :type content_language: str + :param content_md5: + :type content_md5: bytearray + :param content_disposition: + :type content_disposition: str + :param cache_control: + :type cache_control: str + :param blob_sequence_number: + :type blob_sequence_number: long + :param blob_type: Possible values include: "BlockBlob", "PageBlob", "AppendBlob". + :type blob_type: str or ~azure.storage.blob.models.BlobType + :param lease_status: Possible values include: "locked", "unlocked". + :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType + :param lease_state: Possible values include: "available", "leased", "expired", "breaking", + "broken". + :type lease_state: str or ~azure.storage.blob.models.LeaseStateType + :param lease_duration: Possible values include: "infinite", "fixed". + :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType + :param copy_id: + :type copy_id: str + :param copy_status: Possible values include: "pending", "success", "aborted", "failed". + :type copy_status: str or ~azure.storage.blob.models.CopyStatusType + :param copy_source: + :type copy_source: str + :param copy_progress: + :type copy_progress: str + :param copy_completion_time: + :type copy_completion_time: ~datetime.datetime + :param copy_status_description: + :type copy_status_description: str + :param server_encrypted: + :type server_encrypted: bool + :param incremental_copy: + :type incremental_copy: bool + :param destination_snapshot: + :type destination_snapshot: str + :param deleted_time: + :type deleted_time: ~datetime.datetime + :param remaining_retention_days: + :type remaining_retention_days: int + :param access_tier: Possible values include: "P4", "P6", "P10", "P15", "P20", "P30", "P40", + "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive". + :type access_tier: str or ~azure.storage.blob.models.AccessTier + :param access_tier_inferred: + :type access_tier_inferred: bool + :param archive_status: Possible values include: "rehydrate-pending-to-hot", + "rehydrate-pending-to-cool". + :type archive_status: str or ~azure.storage.blob.models.ArchiveStatus + :param customer_provided_key_sha256: + :type customer_provided_key_sha256: str + :param encryption_scope: The name of the encryption scope under which the blob is encrypted. + :type encryption_scope: str + :param access_tier_change_time: + :type access_tier_change_time: ~datetime.datetime + :param tag_count: + :type tag_count: int + :param expires_on: + :type expires_on: ~datetime.datetime + :param is_sealed: + :type is_sealed: bool + :param rehydrate_priority: If an object is in rehydrate pending state then this header is + returned with priority of rehydrate. Valid values are High and Standard. Possible values + include: "High", "Standard". + :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :param last_accessed_on: + :type last_accessed_on: ~datetime.datetime + :param immutability_policy_expires_on: + :type immutability_policy_expires_on: ~datetime.datetime + :param immutability_policy_mode: Possible values include: "Unlocked", "Locked", "Mutable". + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: + :type legal_hold: bool + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + } + + _attribute_map = { + 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, + 'etag': {'key': 'Etag', 'type': 'str'}, + 'content_length': {'key': 'Content-Length', 'type': 'long'}, + 'content_type': {'key': 'Content-Type', 'type': 'str'}, + 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, + 'content_language': {'key': 'Content-Language', 'type': 'str'}, + 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, + 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, + 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, + 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, + 'blob_type': {'key': 'BlobType', 'type': 'str'}, + 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, + 'lease_state': {'key': 'LeaseState', 'type': 'str'}, + 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, + 'copy_id': {'key': 'CopyId', 'type': 'str'}, + 'copy_status': {'key': 'CopyStatus', 'type': 'str'}, + 'copy_source': {'key': 'CopySource', 'type': 'str'}, + 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, + 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, + 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, + 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, + 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, + 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, + 'access_tier': {'key': 'AccessTier', 'type': 'str'}, + 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, + 'archive_status': {'key': 'ArchiveStatus', 'type': 'str'}, + 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, + 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, + 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, + 'tag_count': {'key': 'TagCount', 'type': 'int'}, + 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, + 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, + 'rehydrate_priority': {'key': 'RehydratePriority', 'type': 'str'}, + 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, + 'immutability_policy_expires_on': {'key': 'ImmutabilityPolicyUntilDate', 'type': 'rfc-1123'}, + 'immutability_policy_mode': {'key': 'ImmutabilityPolicyMode', 'type': 'str'}, + 'legal_hold': {'key': 'LegalHold', 'type': 'bool'}, + } + _xml_map = { + 'name': 'Properties' + } + + def __init__( + self, + *, + last_modified: datetime.datetime, + etag: str, + creation_time: Optional[datetime.datetime] = None, + content_length: Optional[int] = None, + content_type: Optional[str] = None, + content_encoding: Optional[str] = None, + content_language: Optional[str] = None, + content_md5: Optional[bytearray] = None, + content_disposition: Optional[str] = None, + cache_control: Optional[str] = None, + blob_sequence_number: Optional[int] = None, + blob_type: Optional[Union[str, "BlobType"]] = None, + lease_status: Optional[Union[str, "LeaseStatusType"]] = None, + lease_state: Optional[Union[str, "LeaseStateType"]] = None, + lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, + copy_id: Optional[str] = None, + copy_status: Optional[Union[str, "CopyStatusType"]] = None, + copy_source: Optional[str] = None, + copy_progress: Optional[str] = None, + copy_completion_time: Optional[datetime.datetime] = None, + copy_status_description: Optional[str] = None, + server_encrypted: Optional[bool] = None, + incremental_copy: Optional[bool] = None, + destination_snapshot: Optional[str] = None, + deleted_time: Optional[datetime.datetime] = None, + remaining_retention_days: Optional[int] = None, + access_tier: Optional[Union[str, "AccessTier"]] = None, + access_tier_inferred: Optional[bool] = None, + archive_status: Optional[Union[str, "ArchiveStatus"]] = None, + customer_provided_key_sha256: Optional[str] = None, + encryption_scope: Optional[str] = None, + access_tier_change_time: Optional[datetime.datetime] = None, + tag_count: Optional[int] = None, + expires_on: Optional[datetime.datetime] = None, + is_sealed: Optional[bool] = None, + rehydrate_priority: Optional[Union[str, "RehydratePriority"]] = None, + last_accessed_on: Optional[datetime.datetime] = None, + immutability_policy_expires_on: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + **kwargs + ): + super(BlobPropertiesInternal, self).__init__(**kwargs) + self.creation_time = creation_time + self.last_modified = last_modified + self.etag = etag + self.content_length = content_length + self.content_type = content_type + self.content_encoding = content_encoding + self.content_language = content_language + self.content_md5 = content_md5 + self.content_disposition = content_disposition + self.cache_control = cache_control + self.blob_sequence_number = blob_sequence_number + self.blob_type = blob_type + self.lease_status = lease_status + self.lease_state = lease_state + self.lease_duration = lease_duration + self.copy_id = copy_id + self.copy_status = copy_status + self.copy_source = copy_source + self.copy_progress = copy_progress + self.copy_completion_time = copy_completion_time + self.copy_status_description = copy_status_description + self.server_encrypted = server_encrypted + self.incremental_copy = incremental_copy + self.destination_snapshot = destination_snapshot + self.deleted_time = deleted_time + self.remaining_retention_days = remaining_retention_days + self.access_tier = access_tier + self.access_tier_inferred = access_tier_inferred + self.archive_status = archive_status + self.customer_provided_key_sha256 = customer_provided_key_sha256 + self.encryption_scope = encryption_scope + self.access_tier_change_time = access_tier_change_time + self.tag_count = tag_count + self.expires_on = expires_on + self.is_sealed = is_sealed + self.rehydrate_priority = rehydrate_priority + self.last_accessed_on = last_accessed_on + self.immutability_policy_expires_on = immutability_policy_expires_on + self.immutability_policy_mode = immutability_policy_mode + self.legal_hold = legal_hold + + +class BlobTag(msrest.serialization.Model): + """BlobTag. + + All required parameters must be populated in order to send to Azure. + + :param key: Required. + :type key: str + :param value: Required. + :type value: str + """ + + _validation = { + 'key': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'key': {'key': 'Key', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'str'}, + } + _xml_map = { + 'name': 'Tag' + } + + def __init__( + self, + *, + key: str, + value: str, + **kwargs + ): + super(BlobTag, self).__init__(**kwargs) + self.key = key + self.value = value + + +class BlobTags(msrest.serialization.Model): + """Blob tags. + + All required parameters must be populated in order to send to Azure. + + :param blob_tag_set: Required. + :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] + """ + + _validation = { + 'blob_tag_set': {'required': True}, + } + + _attribute_map = { + 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'wrapped': True, 'itemsName': 'Tag'}}, + } + _xml_map = { + 'name': 'Tags' + } + + def __init__( + self, + *, + blob_tag_set: List["BlobTag"], + **kwargs + ): + super(BlobTags, self).__init__(**kwargs) + self.blob_tag_set = blob_tag_set + + +class Block(msrest.serialization.Model): + """Represents a single block in a block blob. It describes the block's ID and size. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The base64 encoded block ID. + :type name: str + :param size: Required. The block size in bytes. + :type size: long + """ + + _validation = { + 'name': {'required': True}, + 'size': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'size': {'key': 'Size', 'type': 'long'}, + } + + def __init__( + self, + *, + name: str, + size: int, + **kwargs + ): + super(Block, self).__init__(**kwargs) + self.name = name + self.size = size + + +class BlockList(msrest.serialization.Model): + """BlockList. + + :param committed_blocks: + :type committed_blocks: list[~azure.storage.blob.models.Block] + :param uncommitted_blocks: + :type uncommitted_blocks: list[~azure.storage.blob.models.Block] + """ + + _attribute_map = { + 'committed_blocks': {'key': 'CommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, + 'uncommitted_blocks': {'key': 'UncommittedBlocks', 'type': '[Block]', 'xml': {'wrapped': True}}, + } + + def __init__( + self, + *, + committed_blocks: Optional[List["Block"]] = None, + uncommitted_blocks: Optional[List["Block"]] = None, + **kwargs + ): + super(BlockList, self).__init__(**kwargs) + self.committed_blocks = committed_blocks + self.uncommitted_blocks = uncommitted_blocks + + +class BlockLookupList(msrest.serialization.Model): + """BlockLookupList. + + :param committed: + :type committed: list[str] + :param uncommitted: + :type uncommitted: list[str] + :param latest: + :type latest: list[str] + """ + + _attribute_map = { + 'committed': {'key': 'Committed', 'type': '[str]', 'xml': {'itemsName': 'Committed'}}, + 'uncommitted': {'key': 'Uncommitted', 'type': '[str]', 'xml': {'itemsName': 'Uncommitted'}}, + 'latest': {'key': 'Latest', 'type': '[str]', 'xml': {'itemsName': 'Latest'}}, + } + _xml_map = { + 'name': 'BlockList' + } + + def __init__( + self, + *, + committed: Optional[List[str]] = None, + uncommitted: Optional[List[str]] = None, + latest: Optional[List[str]] = None, + **kwargs + ): + super(BlockLookupList, self).__init__(**kwargs) + self.committed = committed + self.uncommitted = uncommitted + self.latest = latest + + +class ClearRange(msrest.serialization.Model): + """ClearRange. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. + :type start: long + :param end: Required. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'ClearRange' + } + + def __init__( + self, + *, + start: int, + end: int, + **kwargs + ): + super(ClearRange, self).__init__(**kwargs) + self.start = start + self.end = end + + +class ContainerCpkScopeInfo(msrest.serialization.Model): + """Parameter group. + + :param default_encryption_scope: Optional. Version 2019-07-07 and later. Specifies the + default encryption scope to set on the container and use for all future writes. + :type default_encryption_scope: str + :param prevent_encryption_scope_override: Optional. Version 2019-07-07 and newer. If true, + prevents any request from specifying a different encryption scope than the scope set on the + container. + :type prevent_encryption_scope_override: bool + """ + + _attribute_map = { + 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, + 'prevent_encryption_scope_override': {'key': 'PreventEncryptionScopeOverride', 'type': 'bool'}, + } + + def __init__( + self, + *, + default_encryption_scope: Optional[str] = None, + prevent_encryption_scope_override: Optional[bool] = None, + **kwargs + ): + super(ContainerCpkScopeInfo, self).__init__(**kwargs) + self.default_encryption_scope = default_encryption_scope + self.prevent_encryption_scope_override = prevent_encryption_scope_override + + +class ContainerItem(msrest.serialization.Model): + """An Azure Storage container. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param deleted: + :type deleted: bool + :param version: + :type version: str + :param properties: Required. Properties of a container. + :type properties: ~azure.storage.blob.models.ContainerProperties + :param metadata: Dictionary of :code:``. + :type metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'deleted': {'key': 'Deleted', 'type': 'bool'}, + 'version': {'key': 'Version', 'type': 'str'}, + 'properties': {'key': 'Properties', 'type': 'ContainerProperties'}, + 'metadata': {'key': 'Metadata', 'type': '{str}'}, + } + _xml_map = { + 'name': 'Container' + } + + def __init__( + self, + *, + name: str, + properties: "ContainerProperties", + deleted: Optional[bool] = None, + version: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs + ): + super(ContainerItem, self).__init__(**kwargs) + self.name = name + self.deleted = deleted + self.version = version + self.properties = properties + self.metadata = metadata + + +class ContainerProperties(msrest.serialization.Model): + """Properties of a container. + + All required parameters must be populated in order to send to Azure. + + :param last_modified: Required. + :type last_modified: ~datetime.datetime + :param etag: Required. + :type etag: str + :param lease_status: Possible values include: "locked", "unlocked". + :type lease_status: str or ~azure.storage.blob.models.LeaseStatusType + :param lease_state: Possible values include: "available", "leased", "expired", "breaking", + "broken". + :type lease_state: str or ~azure.storage.blob.models.LeaseStateType + :param lease_duration: Possible values include: "infinite", "fixed". + :type lease_duration: str or ~azure.storage.blob.models.LeaseDurationType + :param public_access: Possible values include: "container", "blob". + :type public_access: str or ~azure.storage.blob.models.PublicAccessType + :param has_immutability_policy: + :type has_immutability_policy: bool + :param has_legal_hold: + :type has_legal_hold: bool + :param default_encryption_scope: + :type default_encryption_scope: str + :param prevent_encryption_scope_override: + :type prevent_encryption_scope_override: bool + :param deleted_time: + :type deleted_time: ~datetime.datetime + :param remaining_retention_days: + :type remaining_retention_days: int + :param is_immutable_storage_with_versioning_enabled: Indicates if version level worm is enabled + on this container. + :type is_immutable_storage_with_versioning_enabled: bool + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + } + + _attribute_map = { + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, + 'etag': {'key': 'Etag', 'type': 'str'}, + 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, + 'lease_state': {'key': 'LeaseState', 'type': 'str'}, + 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, + 'public_access': {'key': 'PublicAccess', 'type': 'str'}, + 'has_immutability_policy': {'key': 'HasImmutabilityPolicy', 'type': 'bool'}, + 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool'}, + 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str'}, + 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool'}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, + 'is_immutable_storage_with_versioning_enabled': {'key': 'ImmutableStorageWithVersioningEnabled', 'type': 'bool'}, + } + + def __init__( + self, + *, + last_modified: datetime.datetime, + etag: str, + lease_status: Optional[Union[str, "LeaseStatusType"]] = None, + lease_state: Optional[Union[str, "LeaseStateType"]] = None, + lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, + public_access: Optional[Union[str, "PublicAccessType"]] = None, + has_immutability_policy: Optional[bool] = None, + has_legal_hold: Optional[bool] = None, + default_encryption_scope: Optional[str] = None, + prevent_encryption_scope_override: Optional[bool] = None, + deleted_time: Optional[datetime.datetime] = None, + remaining_retention_days: Optional[int] = None, + is_immutable_storage_with_versioning_enabled: Optional[bool] = None, + **kwargs + ): + super(ContainerProperties, self).__init__(**kwargs) + self.last_modified = last_modified + self.etag = etag + self.lease_status = lease_status + self.lease_state = lease_state + self.lease_duration = lease_duration + self.public_access = public_access + self.has_immutability_policy = has_immutability_policy + self.has_legal_hold = has_legal_hold + self.default_encryption_scope = default_encryption_scope + self.prevent_encryption_scope_override = prevent_encryption_scope_override + self.deleted_time = deleted_time + self.remaining_retention_days = remaining_retention_days + self.is_immutable_storage_with_versioning_enabled = is_immutable_storage_with_versioning_enabled + + +class CorsRule(msrest.serialization.Model): + """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param allowed_origins: Required. The origin domains that are permitted to make a request + against the storage service via CORS. The origin domain is the domain from which the request + originates. Note that the origin must be an exact case-sensitive match with the origin that the + user age sends to the service. You can also use the wildcard character '*' to allow all origin + domains to make requests via CORS. + :type allowed_origins: str + :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may + use for a CORS request. (comma separated). + :type allowed_methods: str + :param allowed_headers: Required. the request headers that the origin domain may specify on the + CORS request. + :type allowed_headers: str + :param exposed_headers: Required. The response headers that may be sent in the response to the + CORS request and exposed by the browser to the request issuer. + :type exposed_headers: str + :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the + preflight OPTIONS request. + :type max_age_in_seconds: int + """ + + _validation = { + 'allowed_origins': {'required': True}, + 'allowed_methods': {'required': True}, + 'allowed_headers': {'required': True}, + 'exposed_headers': {'required': True}, + 'max_age_in_seconds': {'required': True, 'minimum': 0}, + } + + _attribute_map = { + 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, + 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, + 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, + 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, + 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, + } + + def __init__( + self, + *, + allowed_origins: str, + allowed_methods: str, + allowed_headers: str, + exposed_headers: str, + max_age_in_seconds: int, + **kwargs + ): + super(CorsRule, self).__init__(**kwargs) + self.allowed_origins = allowed_origins + self.allowed_methods = allowed_methods + self.allowed_headers = allowed_headers + self.exposed_headers = exposed_headers + self.max_age_in_seconds = max_age_in_seconds + + +class CpkInfo(msrest.serialization.Model): + """Parameter group. + + :param encryption_key: Optional. Specifies the encryption key to use to encrypt the data + provided in the request. If not specified, encryption is performed with the root account + encryption key. For more information, see Encryption at Rest for Azure Storage Services. + :type encryption_key: str + :param encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided + if the x-ms-encryption-key header is provided. + :type encryption_key_sha256: str + :param encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, + the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is + provided. Possible values include: "None", "AES256". + :type encryption_algorithm: str or ~azure.storage.blob.models.EncryptionAlgorithmType + """ + + _attribute_map = { + 'encryption_key': {'key': 'encryptionKey', 'type': 'str'}, + 'encryption_key_sha256': {'key': 'encryptionKeySha256', 'type': 'str'}, + 'encryption_algorithm': {'key': 'encryptionAlgorithm', 'type': 'str'}, + } + + def __init__( + self, + *, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "EncryptionAlgorithmType"]] = None, + **kwargs + ): + super(CpkInfo, self).__init__(**kwargs) + self.encryption_key = encryption_key + self.encryption_key_sha256 = encryption_key_sha256 + self.encryption_algorithm = encryption_algorithm + + +class CpkScopeInfo(msrest.serialization.Model): + """Parameter group. + + :param encryption_scope: Optional. Version 2019-07-07 and later. Specifies the name of the + encryption scope to use to encrypt the data provided in the request. If not specified, + encryption is performed with the default account encryption scope. For more information, see + Encryption at Rest for Azure Storage Services. + :type encryption_scope: str + """ + + _attribute_map = { + 'encryption_scope': {'key': 'encryptionScope', 'type': 'str'}, + } + + def __init__( + self, + *, + encryption_scope: Optional[str] = None, + **kwargs + ): + super(CpkScopeInfo, self).__init__(**kwargs) + self.encryption_scope = encryption_scope + + +class DataLakeStorageError(msrest.serialization.Model): + """DataLakeStorageError. + + :param data_lake_storage_error_details: The service error response object. + :type data_lake_storage_error_details: ~azure.storage.blob.models.DataLakeStorageErrorError + """ + + _attribute_map = { + 'data_lake_storage_error_details': {'key': 'error', 'type': 'DataLakeStorageErrorError'}, + } + + def __init__( + self, + *, + data_lake_storage_error_details: Optional["DataLakeStorageErrorError"] = None, + **kwargs + ): + super(DataLakeStorageError, self).__init__(**kwargs) + self.data_lake_storage_error_details = data_lake_storage_error_details + + +class DataLakeStorageErrorError(msrest.serialization.Model): + """The service error response object. + + :param code: The service error code. + :type code: str + :param message: The service error message. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'Code', 'type': 'str'}, + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__( + self, + *, + code: Optional[str] = None, + message: Optional[str] = None, + **kwargs + ): + super(DataLakeStorageErrorError, self).__init__(**kwargs) + self.code = code + self.message = message + + +class DelimitedTextConfiguration(msrest.serialization.Model): + """delimited text configuration. + + All required parameters must be populated in order to send to Azure. + + :param column_separator: Required. column separator. + :type column_separator: str + :param field_quote: Required. field quote. + :type field_quote: str + :param record_separator: Required. record separator. + :type record_separator: str + :param escape_char: Required. escape char. + :type escape_char: str + :param headers_present: Required. has headers. + :type headers_present: bool + """ + + _validation = { + 'column_separator': {'required': True}, + 'field_quote': {'required': True}, + 'record_separator': {'required': True}, + 'escape_char': {'required': True}, + 'headers_present': {'required': True}, + } + + _attribute_map = { + 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, + 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, + 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, + 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, + 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, + } + _xml_map = { + 'name': 'DelimitedTextConfiguration' + } + + def __init__( + self, + *, + column_separator: str, + field_quote: str, + record_separator: str, + escape_char: str, + headers_present: bool, + **kwargs + ): + super(DelimitedTextConfiguration, self).__init__(**kwargs) + self.column_separator = column_separator + self.field_quote = field_quote + self.record_separator = record_separator + self.escape_char = escape_char + self.headers_present = headers_present + + +class DirectoryHttpHeaders(msrest.serialization.Model): + """Parameter group. + + :param cache_control: Cache control for given resource. + :type cache_control: str + :param content_type: Content type for given resource. + :type content_type: str + :param content_encoding: Content encoding for given resource. + :type content_encoding: str + :param content_language: Content language for given resource. + :type content_language: str + :param content_disposition: Content disposition for given resource. + :type content_disposition: str + """ + + _attribute_map = { + 'cache_control': {'key': 'cacheControl', 'type': 'str'}, + 'content_type': {'key': 'contentType', 'type': 'str'}, + 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, + 'content_language': {'key': 'contentLanguage', 'type': 'str'}, + 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, + } + + def __init__( + self, + *, + cache_control: Optional[str] = None, + content_type: Optional[str] = None, + content_encoding: Optional[str] = None, + content_language: Optional[str] = None, + content_disposition: Optional[str] = None, + **kwargs + ): + super(DirectoryHttpHeaders, self).__init__(**kwargs) + self.cache_control = cache_control + self.content_type = content_type + self.content_encoding = content_encoding + self.content_language = content_language + self.content_disposition = content_disposition + + +class FilterBlobItem(msrest.serialization.Model): + """Blob info from a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param container_name: Required. + :type container_name: str + :param tags: A set of tags. Blob tags. + :type tags: ~azure.storage.blob.models.BlobTags + """ + + _validation = { + 'name': {'required': True}, + 'container_name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'container_name': {'key': 'ContainerName', 'type': 'str'}, + 'tags': {'key': 'Tags', 'type': 'BlobTags'}, + } + _xml_map = { + 'name': 'Blob' + } + + def __init__( + self, + *, + name: str, + container_name: str, + tags: Optional["BlobTags"] = None, + **kwargs + ): + super(FilterBlobItem, self).__init__(**kwargs) + self.name = name + self.container_name = container_name + self.tags = tags + + +class FilterBlobSegment(msrest.serialization.Model): + """The result of a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param where: Required. + :type where: str + :param blobs: Required. + :type blobs: list[~azure.storage.blob.models.FilterBlobItem] + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'where': {'required': True}, + 'blobs': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'where': {'key': 'Where', 'type': 'str'}, + 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'wrapped': True, 'itemsName': 'Blob'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + *, + service_endpoint: str, + where: str, + blobs: List["FilterBlobItem"], + next_marker: Optional[str] = None, + **kwargs + ): + super(FilterBlobSegment, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.where = where + self.blobs = blobs + self.next_marker = next_marker + + +class GeoReplication(msrest.serialization.Model): + """Geo-Replication information for the Secondary Storage Service. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. The status of the secondary location. Possible values include: "live", + "bootstrap", "unavailable". + :type status: str or ~azure.storage.blob.models.GeoReplicationStatusType + :param last_sync_time: Required. A GMT date/time value, to the second. All primary writes + preceding this value are guaranteed to be available for read operations at the secondary. + Primary writes after this point in time may or may not be available for reads. + :type last_sync_time: ~datetime.datetime + """ + + _validation = { + 'status': {'required': True}, + 'last_sync_time': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'Status', 'type': 'str'}, + 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123'}, + } + + def __init__( + self, + *, + status: Union[str, "GeoReplicationStatusType"], + last_sync_time: datetime.datetime, + **kwargs + ): + super(GeoReplication, self).__init__(**kwargs) + self.status = status + self.last_sync_time = last_sync_time + + +class JsonTextConfiguration(msrest.serialization.Model): + """json text configuration. + + All required parameters must be populated in order to send to Azure. + + :param record_separator: Required. record separator. + :type record_separator: str + """ + + _validation = { + 'record_separator': {'required': True}, + } + + _attribute_map = { + 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, + } + _xml_map = { + 'name': 'JsonTextConfiguration' + } + + def __init__( + self, + *, + record_separator: str, + **kwargs + ): + super(JsonTextConfiguration, self).__init__(**kwargs) + self.record_separator = record_separator + + +class KeyInfo(msrest.serialization.Model): + """Key information. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. The date-time the key is active in ISO 8601 UTC time. + :type start: str + :param expiry: Required. The date-time the key expires in ISO 8601 UTC time. + :type expiry: str + """ + + _validation = { + 'start': {'required': True}, + 'expiry': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str'}, + 'expiry': {'key': 'Expiry', 'type': 'str'}, + } + + def __init__( + self, + *, + start: str, + expiry: str, + **kwargs + ): + super(KeyInfo, self).__init__(**kwargs) + self.start = start + self.expiry = expiry + + +class LeaseAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param lease_id: If specified, the operation only succeeds if the resource's lease is active + and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': 'leaseId', 'type': 'str'}, + } + + def __init__( + self, + *, + lease_id: Optional[str] = None, + **kwargs + ): + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = lease_id + + +class ListBlobsFlatSegmentResponse(msrest.serialization.Model): + """An enumeration of blobs. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param container_name: Required. + :type container_name: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param segment: Required. + :type segment: ~azure.storage.blob.models.BlobFlatListSegment + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_name': {'required': True}, + 'segment': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'segment': {'key': 'Segment', 'type': 'BlobFlatListSegment'}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + *, + service_endpoint: str, + container_name: str, + segment: "BlobFlatListSegment", + prefix: Optional[str] = None, + marker: Optional[str] = None, + max_results: Optional[int] = None, + next_marker: Optional[str] = None, + **kwargs + ): + super(ListBlobsFlatSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.container_name = container_name + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.segment = segment + self.next_marker = next_marker + + +class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): + """An enumeration of blobs. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param container_name: Required. + :type container_name: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param delimiter: + :type delimiter: str + :param segment: Required. + :type segment: ~azure.storage.blob.models.BlobHierarchyListSegment + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_name': {'required': True}, + 'segment': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'delimiter': {'key': 'Delimiter', 'type': 'str'}, + 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + *, + service_endpoint: str, + container_name: str, + segment: "BlobHierarchyListSegment", + prefix: Optional[str] = None, + marker: Optional[str] = None, + max_results: Optional[int] = None, + delimiter: Optional[str] = None, + next_marker: Optional[str] = None, + **kwargs + ): + super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.container_name = container_name + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.delimiter = delimiter + self.segment = segment + self.next_marker = next_marker + + +class ListContainersSegmentResponse(msrest.serialization.Model): + """An enumeration of containers. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param container_items: Required. + :type container_items: list[~azure.storage.blob.models.ContainerItem] + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_items': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'container_items': {'key': 'ContainerItems', 'type': '[ContainerItem]', 'xml': {'name': 'Containers', 'wrapped': True, 'itemsName': 'Container'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + *, + service_endpoint: str, + container_items: List["ContainerItem"], + prefix: Optional[str] = None, + marker: Optional[str] = None, + max_results: Optional[int] = None, + next_marker: Optional[str] = None, + **kwargs + ): + super(ListContainersSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.container_items = container_items + self.next_marker = next_marker + + +class Logging(msrest.serialization.Model): + """Azure Analytics Logging settings. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of Storage Analytics to configure. + :type version: str + :param delete: Required. Indicates whether all delete requests should be logged. + :type delete: bool + :param read: Required. Indicates whether all read requests should be logged. + :type read: bool + :param write: Required. Indicates whether all write requests should be logged. + :type write: bool + :param retention_policy: Required. the retention policy which determines how long the + associated data should persist. + :type retention_policy: ~azure.storage.blob.models.RetentionPolicy + """ + + _validation = { + 'version': {'required': True}, + 'delete': {'required': True}, + 'read': {'required': True}, + 'write': {'required': True}, + 'retention_policy': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str'}, + 'delete': {'key': 'Delete', 'type': 'bool'}, + 'read': {'key': 'Read', 'type': 'bool'}, + 'write': {'key': 'Write', 'type': 'bool'}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, + } + + def __init__( + self, + *, + version: str, + delete: bool, + read: bool, + write: bool, + retention_policy: "RetentionPolicy", + **kwargs + ): + super(Logging, self).__init__(**kwargs) + self.version = version + self.delete = delete + self.read = read + self.write = write + self.retention_policy = retention_policy + + +class Metrics(msrest.serialization.Model): + """a summary of request statistics grouped by API in hour or minute aggregates for blobs. + + All required parameters must be populated in order to send to Azure. + + :param version: The version of Storage Analytics to configure. + :type version: str + :param enabled: Required. Indicates whether metrics are enabled for the Blob service. + :type enabled: bool + :param include_apis: Indicates whether metrics should generate summary statistics for called + API operations. + :type include_apis: bool + :param retention_policy: the retention policy which determines how long the associated data + should persist. + :type retention_policy: ~azure.storage.blob.models.RetentionPolicy + """ + + _validation = { + 'enabled': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str'}, + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, + } + + def __init__( + self, + *, + enabled: bool, + version: Optional[str] = None, + include_apis: Optional[bool] = None, + retention_policy: Optional["RetentionPolicy"] = None, + **kwargs + ): + super(Metrics, self).__init__(**kwargs) + self.version = version + self.enabled = enabled + self.include_apis = include_apis + self.retention_policy = retention_policy + + +class ModifiedAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param if_modified_since: Specify this header value to operate only on a blob if it has been + modified since the specified date/time. + :type if_modified_since: ~datetime.datetime + :param if_unmodified_since: Specify this header value to operate only on a blob if it has not + been modified since the specified date/time. + :type if_unmodified_since: ~datetime.datetime + :param if_match: Specify an ETag value to operate only on blobs with a matching value. + :type if_match: str + :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. + :type if_none_match: str + :param if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a + matching value. + :type if_tags: str + """ + + _attribute_map = { + 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, + 'if_match': {'key': 'ifMatch', 'type': 'str'}, + 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, + 'if_tags': {'key': 'ifTags', 'type': 'str'}, + } + + def __init__( + self, + *, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + **kwargs + ): + super(ModifiedAccessConditions, self).__init__(**kwargs) + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since + self.if_match = if_match + self.if_none_match = if_none_match + self.if_tags = if_tags + + +class PageList(msrest.serialization.Model): + """the list of pages. + + :param page_range: + :type page_range: list[~azure.storage.blob.models.PageRange] + :param clear_range: + :type clear_range: list[~azure.storage.blob.models.ClearRange] + """ + + _attribute_map = { + 'page_range': {'key': 'PageRange', 'type': '[PageRange]'}, + 'clear_range': {'key': 'ClearRange', 'type': '[ClearRange]'}, + } + + def __init__( + self, + *, + page_range: Optional[List["PageRange"]] = None, + clear_range: Optional[List["ClearRange"]] = None, + **kwargs + ): + super(PageList, self).__init__(**kwargs) + self.page_range = page_range + self.clear_range = clear_range + + +class PageRange(msrest.serialization.Model): + """PageRange. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. + :type start: long + :param end: Required. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'PageRange' + } + + def __init__( + self, + *, + start: int, + end: int, + **kwargs + ): + super(PageRange, self).__init__(**kwargs) + self.start = start + self.end = end + + +class QueryFormat(msrest.serialization.Model): + """QueryFormat. + + :param type: The quick query format type. Possible values include: "delimited", "json", + "arrow", "parquet". + :type type: str or ~azure.storage.blob.models.QueryFormatType + :param delimited_text_configuration: delimited text configuration. + :type delimited_text_configuration: ~azure.storage.blob.models.DelimitedTextConfiguration + :param json_text_configuration: json text configuration. + :type json_text_configuration: ~azure.storage.blob.models.JsonTextConfiguration + :param arrow_configuration: arrow configuration. + :type arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration + :param parquet_text_configuration: Any object. + :type parquet_text_configuration: any + """ + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str', 'xml': {'name': 'Type'}}, + 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration'}, + 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration'}, + 'arrow_configuration': {'key': 'ArrowConfiguration', 'type': 'ArrowConfiguration'}, + 'parquet_text_configuration': {'key': 'ParquetTextConfiguration', 'type': 'object'}, + } + + def __init__( + self, + *, + type: Optional[Union[str, "QueryFormatType"]] = None, + delimited_text_configuration: Optional["DelimitedTextConfiguration"] = None, + json_text_configuration: Optional["JsonTextConfiguration"] = None, + arrow_configuration: Optional["ArrowConfiguration"] = None, + parquet_text_configuration: Optional[Any] = None, + **kwargs + ): + super(QueryFormat, self).__init__(**kwargs) + self.type = type + self.delimited_text_configuration = delimited_text_configuration + self.json_text_configuration = json_text_configuration + self.arrow_configuration = arrow_configuration + self.parquet_text_configuration = parquet_text_configuration + + +class QueryRequest(msrest.serialization.Model): + """the quick query body. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar query_type: Required. the query type. Default value: "SQL". + :vartype query_type: str + :param expression: Required. a query statement. + :type expression: str + :param input_serialization: + :type input_serialization: ~azure.storage.blob.models.QuerySerialization + :param output_serialization: + :type output_serialization: ~azure.storage.blob.models.QuerySerialization + """ + + _validation = { + 'query_type': {'required': True, 'constant': True}, + 'expression': {'required': True}, + } + + _attribute_map = { + 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, + 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, + 'input_serialization': {'key': 'InputSerialization', 'type': 'QuerySerialization'}, + 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuerySerialization'}, + } + _xml_map = { + 'name': 'QueryRequest' + } + + query_type = "SQL" + + def __init__( + self, + *, + expression: str, + input_serialization: Optional["QuerySerialization"] = None, + output_serialization: Optional["QuerySerialization"] = None, + **kwargs + ): + super(QueryRequest, self).__init__(**kwargs) + self.expression = expression + self.input_serialization = input_serialization + self.output_serialization = output_serialization + + +class QuerySerialization(msrest.serialization.Model): + """QuerySerialization. + + All required parameters must be populated in order to send to Azure. + + :param format: Required. + :type format: ~azure.storage.blob.models.QueryFormat + """ + + _validation = { + 'format': {'required': True}, + } + + _attribute_map = { + 'format': {'key': 'Format', 'type': 'QueryFormat'}, + } + + def __init__( + self, + *, + format: "QueryFormat", + **kwargs + ): + super(QuerySerialization, self).__init__(**kwargs) + self.format = format + + +class RetentionPolicy(msrest.serialization.Model): + """the retention policy which determines how long the associated data should persist. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether a retention policy is enabled for the storage + service. + :type enabled: bool + :param days: Indicates the number of days that metrics or logging or soft-deleted data should + be retained. All data older than this value will be deleted. + :type days: int + :param allow_permanent_delete: Indicates whether permanent delete is allowed on this storage + account. + :type allow_permanent_delete: bool + """ + + _validation = { + 'enabled': {'required': True}, + 'days': {'minimum': 1}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'days': {'key': 'Days', 'type': 'int'}, + 'allow_permanent_delete': {'key': 'AllowPermanentDelete', 'type': 'bool'}, + } + + def __init__( + self, + *, + enabled: bool, + days: Optional[int] = None, + allow_permanent_delete: Optional[bool] = None, + **kwargs + ): + super(RetentionPolicy, self).__init__(**kwargs) + self.enabled = enabled + self.days = days + self.allow_permanent_delete = allow_permanent_delete + + +class SequenceNumberAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param if_sequence_number_less_than_or_equal_to: Specify this header value to operate only on a + blob if it has a sequence number less than or equal to the specified. + :type if_sequence_number_less_than_or_equal_to: long + :param if_sequence_number_less_than: Specify this header value to operate only on a blob if it + has a sequence number less than the specified. + :type if_sequence_number_less_than: long + :param if_sequence_number_equal_to: Specify this header value to operate only on a blob if it + has the specified sequence number. + :type if_sequence_number_equal_to: long + """ + + _attribute_map = { + 'if_sequence_number_less_than_or_equal_to': {'key': 'ifSequenceNumberLessThanOrEqualTo', 'type': 'long'}, + 'if_sequence_number_less_than': {'key': 'ifSequenceNumberLessThan', 'type': 'long'}, + 'if_sequence_number_equal_to': {'key': 'ifSequenceNumberEqualTo', 'type': 'long'}, + } + + def __init__( + self, + *, + if_sequence_number_less_than_or_equal_to: Optional[int] = None, + if_sequence_number_less_than: Optional[int] = None, + if_sequence_number_equal_to: Optional[int] = None, + **kwargs + ): + super(SequenceNumberAccessConditions, self).__init__(**kwargs) + self.if_sequence_number_less_than_or_equal_to = if_sequence_number_less_than_or_equal_to + self.if_sequence_number_less_than = if_sequence_number_less_than + self.if_sequence_number_equal_to = if_sequence_number_equal_to + + +class SignedIdentifier(msrest.serialization.Model): + """signed identifier. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. a unique id. + :type id: str + :param access_policy: An Access policy. + :type access_policy: ~azure.storage.blob.models.AccessPolicy + """ + + _validation = { + 'id': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, + } + _xml_map = { + 'name': 'SignedIdentifier' + } + + def __init__( + self, + *, + id: str, + access_policy: Optional["AccessPolicy"] = None, + **kwargs + ): + super(SignedIdentifier, self).__init__(**kwargs) + self.id = id + self.access_policy = access_policy + + +class SourceModifiedAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param source_if_modified_since: Specify this header value to operate only on a blob if it has + been modified since the specified date/time. + :type source_if_modified_since: ~datetime.datetime + :param source_if_unmodified_since: Specify this header value to operate only on a blob if it + has not been modified since the specified date/time. + :type source_if_unmodified_since: ~datetime.datetime + :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. + :type source_if_match: str + :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching + value. + :type source_if_none_match: str + :param source_if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a + matching value. + :type source_if_tags: str + """ + + _attribute_map = { + 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, + 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, + 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, + 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, + 'source_if_tags': {'key': 'sourceIfTags', 'type': 'str'}, + } + + def __init__( + self, + *, + source_if_modified_since: Optional[datetime.datetime] = None, + source_if_unmodified_since: Optional[datetime.datetime] = None, + source_if_match: Optional[str] = None, + source_if_none_match: Optional[str] = None, + source_if_tags: Optional[str] = None, + **kwargs + ): + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_modified_since = source_if_modified_since + self.source_if_unmodified_since = source_if_unmodified_since + self.source_if_match = source_if_match + self.source_if_none_match = source_if_none_match + self.source_if_tags = source_if_tags + + +class StaticWebsite(msrest.serialization.Model): + """The properties that enable an account to host a static website. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether this account is hosting a static website. + :type enabled: bool + :param index_document: The default name of the index page under each directory. + :type index_document: str + :param error_document404_path: The absolute path of the custom 404 page. + :type error_document404_path: str + :param default_index_document_path: Absolute path of the default index page. + :type default_index_document_path: str + """ + + _validation = { + 'enabled': {'required': True}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'index_document': {'key': 'IndexDocument', 'type': 'str'}, + 'error_document404_path': {'key': 'ErrorDocument404Path', 'type': 'str'}, + 'default_index_document_path': {'key': 'DefaultIndexDocumentPath', 'type': 'str'}, + } + + def __init__( + self, + *, + enabled: bool, + index_document: Optional[str] = None, + error_document404_path: Optional[str] = None, + default_index_document_path: Optional[str] = None, + **kwargs + ): + super(StaticWebsite, self).__init__(**kwargs) + self.enabled = enabled + self.index_document = index_document + self.error_document404_path = error_document404_path + self.default_index_document_path = default_index_document_path + + +class StorageError(msrest.serialization.Model): + """StorageError. + + :param message: + :type message: str + """ + + _attribute_map = { + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__( + self, + *, + message: Optional[str] = None, + **kwargs + ): + super(StorageError, self).__init__(**kwargs) + self.message = message + + +class StorageServiceProperties(msrest.serialization.Model): + """Storage Service Properties. + + :param logging: Azure Analytics Logging settings. + :type logging: ~azure.storage.blob.models.Logging + :param hour_metrics: a summary of request statistics grouped by API in hour or minute + aggregates for blobs. + :type hour_metrics: ~azure.storage.blob.models.Metrics + :param minute_metrics: a summary of request statistics grouped by API in hour or minute + aggregates for blobs. + :type minute_metrics: ~azure.storage.blob.models.Metrics + :param cors: The set of CORS rules. + :type cors: list[~azure.storage.blob.models.CorsRule] + :param default_service_version: The default version to use for requests to the Blob service if + an incoming request's version is not specified. Possible values include version 2008-10-27 and + all more recent versions. + :type default_service_version: str + :param delete_retention_policy: the retention policy which determines how long the associated + data should persist. + :type delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy + :param static_website: The properties that enable an account to host a static website. + :type static_website: ~azure.storage.blob.models.StaticWebsite + """ + + _attribute_map = { + 'logging': {'key': 'Logging', 'type': 'Logging'}, + 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, + 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, + 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, + 'default_service_version': {'key': 'DefaultServiceVersion', 'type': 'str'}, + 'delete_retention_policy': {'key': 'DeleteRetentionPolicy', 'type': 'RetentionPolicy'}, + 'static_website': {'key': 'StaticWebsite', 'type': 'StaticWebsite'}, + } + + def __init__( + self, + *, + logging: Optional["Logging"] = None, + hour_metrics: Optional["Metrics"] = None, + minute_metrics: Optional["Metrics"] = None, + cors: Optional[List["CorsRule"]] = None, + default_service_version: Optional[str] = None, + delete_retention_policy: Optional["RetentionPolicy"] = None, + static_website: Optional["StaticWebsite"] = None, + **kwargs + ): + super(StorageServiceProperties, self).__init__(**kwargs) + self.logging = logging + self.hour_metrics = hour_metrics + self.minute_metrics = minute_metrics + self.cors = cors + self.default_service_version = default_service_version + self.delete_retention_policy = delete_retention_policy + self.static_website = static_website + + +class StorageServiceStats(msrest.serialization.Model): + """Stats for the storage service. + + :param geo_replication: Geo-Replication information for the Secondary Storage Service. + :type geo_replication: ~azure.storage.blob.models.GeoReplication + """ + + _attribute_map = { + 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication'}, + } + + def __init__( + self, + *, + geo_replication: Optional["GeoReplication"] = None, + **kwargs + ): + super(StorageServiceStats, self).__init__(**kwargs) + self.geo_replication = geo_replication + + +class UserDelegationKey(msrest.serialization.Model): + """A user delegation key. + + All required parameters must be populated in order to send to Azure. + + :param signed_oid: Required. The Azure Active Directory object ID in GUID format. + :type signed_oid: str + :param signed_tid: Required. The Azure Active Directory tenant ID in GUID format. + :type signed_tid: str + :param signed_start: Required. The date-time the key is active. + :type signed_start: ~datetime.datetime + :param signed_expiry: Required. The date-time the key expires. + :type signed_expiry: ~datetime.datetime + :param signed_service: Required. Abbreviation of the Azure Storage service that accepts the + key. + :type signed_service: str + :param signed_version: Required. The service version that created the key. + :type signed_version: str + :param value: Required. The key as a base64 string. + :type value: str + """ + + _validation = { + 'signed_oid': {'required': True}, + 'signed_tid': {'required': True}, + 'signed_start': {'required': True}, + 'signed_expiry': {'required': True}, + 'signed_service': {'required': True}, + 'signed_version': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'signed_oid': {'key': 'SignedOid', 'type': 'str'}, + 'signed_tid': {'key': 'SignedTid', 'type': 'str'}, + 'signed_start': {'key': 'SignedStart', 'type': 'iso-8601'}, + 'signed_expiry': {'key': 'SignedExpiry', 'type': 'iso-8601'}, + 'signed_service': {'key': 'SignedService', 'type': 'str'}, + 'signed_version': {'key': 'SignedVersion', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'str'}, + } + + def __init__( + self, + *, + signed_oid: str, + signed_tid: str, + signed_start: datetime.datetime, + signed_expiry: datetime.datetime, + signed_service: str, + signed_version: str, + value: str, + **kwargs + ): + super(UserDelegationKey, self).__init__(**kwargs) + self.signed_oid = signed_oid + self.signed_tid = signed_tid + self.signed_start = signed_start + self.signed_expiry = signed_expiry + self.signed_service = signed_service + self.signed_version = signed_version + self.value = value diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/__init__.py new file mode 100644 index 0000000..62f85c9 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/__init__.py @@ -0,0 +1,25 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._container_operations import ContainerOperations +from ._directory_operations import DirectoryOperations +from ._blob_operations import BlobOperations +from ._page_blob_operations import PageBlobOperations +from ._append_blob_operations import AppendBlobOperations +from ._block_blob_operations import BlockBlobOperations + +__all__ = [ + 'ServiceOperations', + 'ContainerOperations', + 'DirectoryOperations', + 'BlobOperations', + 'PageBlobOperations', + 'AppendBlobOperations', + 'BlockBlobOperations', +] diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_append_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_append_blob_operations.py new file mode 100644 index 0000000..b38af4b --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_append_blob_operations.py @@ -0,0 +1,734 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class AppendBlobOperations(object): + """AppendBlobOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create( + self, + content_length, # type: int + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + blob_tags_string=None, # type: Optional[str] + immutability_policy_expiry=None, # type: Optional[datetime.datetime] + immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] + legal_hold=None, # type: Optional[bool] + blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Create Append Blob operation creates a new append blob. + + :param content_length: The length of the request. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. + :type legal_hold: bool + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + blob_type = "AppendBlob" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if immutability_policy_expiry is not None: + header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') + if immutability_policy_mode is not None: + header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') + if legal_hold is not None: + header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def append_block( + self, + content_length, # type: int + body, # type: IO + timeout=None, # type: Optional[int] + transactional_content_md5=None, # type: Optional[bytearray] + transactional_content_crc64=None, # type: Optional[bytearray] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Append Block operation commits a new block of data to the end of an existing append blob. + The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to + AppendBlob. Append Block is supported only on version 2015-02-21 version or later. + + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param append_position_access_conditions: Parameter group. + :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _max_size = None + _append_position = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if append_position_access_conditions is not None: + _max_size = append_position_access_conditions.max_size + _append_position = append_position_access_conditions.append_position + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "appendblock" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.append_block.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _max_size is not None: + header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') + if _append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + append_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def append_block_from_url( + self, + source_url, # type: str + content_length, # type: int + source_range=None, # type: Optional[str] + source_content_md5=None, # type: Optional[bytearray] + source_contentcrc64=None, # type: Optional[bytearray] + timeout=None, # type: Optional[int] + transactional_content_md5=None, # type: Optional[bytearray] + request_id_parameter=None, # type: Optional[str] + copy_source_authorization=None, # type: Optional[str] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Append Block operation commits a new block of data to the end of an existing append blob + where the contents are read from a source url. The Append Block operation is permitted only if + the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on + version 2015-02-21 version or later. + + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param content_length: The length of the request. + :type content_length: long + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. + :type copy_source_authorization: str + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param append_position_access_conditions: Parameter group. + :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _max_size = None + _append_position = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if append_position_access_conditions is not None: + _max_size = append_position_access_conditions.max_size + _append_position = append_position_access_conditions.append_position + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + comp = "appendblock" + accept = "application/xml" + + # Construct URL + url = self.append_block_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _max_size is not None: + header_parameters['x-ms-blob-condition-maxsize'] = self._serialize.header("max_size", _max_size, 'long') + if _append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if copy_source_authorization is not None: + header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-append-offset']=self._deserialize('str', response.headers.get('x-ms-blob-append-offset')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def seal( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + append_position_access_conditions=None, # type: Optional["_models.AppendPositionAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Seal operation seals the Append Blob to make it read-only. Seal is supported only on + version 2019-12-12 version or later. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param append_position_access_conditions: Parameter group. + :type append_position_access_conditions: ~azure.storage.blob.models.AppendPositionAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _append_position = None + if append_position_access_conditions is not None: + _append_position = append_position_access_conditions.append_position + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + comp = "seal" + accept = "application/xml" + + # Construct URL + url = self.seal.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", _append_position, 'long') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) + + if cls: + return cls(pipeline_response, None, response_headers) + + seal.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_blob_operations.py new file mode 100644 index 0000000..9b60a81 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_blob_operations.py @@ -0,0 +1,3456 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class BlobOperations(object): + """BlobOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def download( + self, + snapshot=None, # type: Optional[str] + version_id=None, # type: Optional[str] + timeout=None, # type: Optional[int] + range=None, # type: Optional[str] + range_get_content_md5=None, # type: Optional[bool] + range_get_content_crc64=None, # type: Optional[bool] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> IO + """The Download operation reads or downloads a blob from the system, including its metadata and + properties. You can also call Download to read a snapshot. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param range_get_content_md5: When set to true and specified together with the Range, the + service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB + in size. + :type range_get_content_md5: bool + :param range_get_content_crc64: When set to true and specified together with the Range, the + service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 + MB in size. + :type range_get_content_crc64: bool + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + accept = "application/xml" + + # Construct URL + url = self.download.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') + if range_get_content_crc64 is not None: + header_parameters['x-ms-range-get-content-crc64'] = self._serialize.header("range_get_content_crc64", range_get_content_crc64, 'bool') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) + response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) + response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) + response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) + response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) + response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) + response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) + response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) + response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) + response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) + response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) + response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) + response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) + response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) + response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + download.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def get_properties( + self, + snapshot=None, # type: Optional[str] + version_id=None, # type: Optional[str] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and + system properties for the blob. It does not return the content of the blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-creation-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['x-ms-or-policy-id']=self._deserialize('str', response.headers.get('x-ms-or-policy-id')) + response_headers['x-ms-or']=self._deserialize('str', response.headers.get('x-ms-or')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-incremental-copy']=self._deserialize('bool', response.headers.get('x-ms-incremental-copy')) + response_headers['x-ms-copy-destination-snapshot']=self._deserialize('str', response.headers.get('x-ms-copy-destination-snapshot')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) + response_headers['x-ms-access-tier-inferred']=self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')) + response_headers['x-ms-archive-status']=self._deserialize('str', response.headers.get('x-ms-archive-status')) + response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['x-ms-is-current-version']=self._deserialize('bool', response.headers.get('x-ms-is-current-version')) + response_headers['x-ms-tag-count']=self._deserialize('long', response.headers.get('x-ms-tag-count')) + response_headers['x-ms-expiry-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')) + response_headers['x-ms-blob-sealed']=self._deserialize('bool', response.headers.get('x-ms-blob-sealed')) + response_headers['x-ms-rehydrate-priority']=self._deserialize('str', response.headers.get('x-ms-rehydrate-priority')) + response_headers['x-ms-last-access-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-last-access-time')) + response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) + response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) + response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def delete( + self, + snapshot=None, # type: Optional[str] + version_id=None, # type: Optional[str] + timeout=None, # type: Optional[int] + delete_snapshots=None, # type: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] + request_id_parameter=None, # type: Optional[str] + blob_delete_type="Permanent", # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is + permanently removed from the storage account. If the storage account's soft delete feature is + enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible + immediately. However, the blob service retains the blob or snapshot for the number of days + specified by the DeleteRetentionPolicy section of [Storage service properties] + (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's + data is permanently removed from the storage account. Note that you continue to be charged for + the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and + specify the "include=deleted" query parameter to discover which blobs and snapshots have been + soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other + operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code + of 404 (ResourceNotFound). + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param delete_snapshots: Required if the blob has associated snapshots. Specify one of the + following two options: include: Delete the base blob and all of its snapshots. only: Delete + only the blob's snapshots and not the blob itself. + :type delete_snapshots: str or ~azure.storage.blob.models.DeleteSnapshotsOptionType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_delete_type: Optional. Only possible value is 'permanent', which specifies to + permanently delete a blob if blob soft delete is enabled. + :type blob_delete_type: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if blob_delete_type is not None: + query_parameters['deletetype'] = self._serialize.query("blob_delete_type", blob_delete_type, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if delete_snapshots is not None: + header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def set_access_control( + self, + timeout=None, # type: Optional[int] + owner=None, # type: Optional[str] + group=None, # type: Optional[str] + posix_permissions=None, # type: Optional[str] + posix_acl=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Set the owner, group, permissions, or access control list for a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_acl: Sets POSIX access control rights on files and directories. The value is a + comma-separated list of access control entries. Each access control entry (ACE) consists of a + scope, a type, a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type posix_acl: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "setAccessControl" + accept = "application/xml" + + # Construct URL + url = self.set_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def get_access_control( + self, + timeout=None, # type: Optional[int] + upn=None, # type: Optional[bool] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Get the owner, group, permissions, or access control list for a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If + "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response + headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If + "false", the values will be returned as Azure Active Directory Object IDs. The default value is + false. + :type upn: bool + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "getAccessControl" + accept = "application/xml" + + # Construct URL + url = self.get_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) + response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) + response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) + response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def rename( + self, + rename_source, # type: str + timeout=None, # type: Optional[int] + path_rename_mode=None, # type: Optional[Union[str, "_models.PathRenameMode"]] + directory_properties=None, # type: Optional[str] + posix_permissions=None, # type: Optional[str] + posix_umask=None, # type: Optional[str] + source_lease_id=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + directory_http_headers=None, # type: Optional["_models.DirectoryHttpHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Rename a blob/file. By default, the destination is overwritten and if the destination already + exists and has a lease the lease is broken. This operation supports conditional HTTP requests. + For more information, see `Specifying Conditional Headers for Blob Service Operations + `_. + To fail if the destination already exists, use a conditional request with If-None-Match: "*". + + :param rename_source: The file or directory to be renamed. The value must have the following + format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will + overwrite the existing properties; otherwise, the existing properties will be preserved. + :type rename_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param path_rename_mode: Determines the behavior of the rename operation. + :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode + :param directory_properties: Optional. User-defined properties to be stored with the file or + directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", + where each value is base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask + restricts permission settings for file and directory, and will only be applied when default Acl + does not exist in parent directory. If the umask bit has set, it means that the corresponding + permission will be disabled. Otherwise the corresponding permission will be determined by the + permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, + a default umask - 0027 will be used. + :type posix_umask: str + :param source_lease_id: A lease ID for the source path. If specified, the source path must have + an active lease and the lease ID must match. + :type source_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param directory_http_headers: Parameter group. + :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _cache_control = None + _content_type = None + _content_encoding = None + _content_language = None + _content_disposition = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if directory_http_headers is not None: + _cache_control = directory_http_headers.cache_control + _content_type = directory_http_headers.content_type + _content_encoding = directory_http_headers.content_encoding + _content_language = directory_http_headers.content_language + _content_disposition = directory_http_headers.content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + accept = "application/xml" + + # Construct URL + url = self.rename.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if path_rename_mode is not None: + query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def undelete( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Undelete a blob that was previously soft deleted. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "undelete" + accept = "application/xml" + + # Construct URL + url = self.undelete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + undelete.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def set_expiry( + self, + expiry_options, # type: Union[str, "_models.BlobExpiryOptions"] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + expires_on=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Sets the time a blob will expire and be deleted. + + :param expiry_options: Required. Indicates mode of the expiry time. + :type expiry_options: str or ~azure.storage.blob.models.BlobExpiryOptions + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param expires_on: The time to set the blob to expiry. + :type expires_on: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "expiry" + accept = "application/xml" + + # Construct URL + url = self.set_expiry.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') + if expires_on is not None: + header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_expiry.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def set_http_headers( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Set HTTP Headers operation sets system properties on the blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_cache_control = None + _blob_content_type = None + _blob_content_md5 = None + _blob_content_encoding = None + _blob_content_language = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _blob_content_disposition = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_disposition = blob_http_headers.blob_content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.set_http_headers.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_http_headers.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def set_immutability_policy( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + immutability_policy_expiry=None, # type: Optional[datetime.datetime] + immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Set Immutability Policy operation sets the immutability policy on the blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "immutabilityPolicies" + accept = "application/xml" + + # Construct URL + url = self.set_immutability_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if immutability_policy_expiry is not None: + header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') + if immutability_policy_mode is not None: + header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-immutability-policy-until-date']=self._deserialize('rfc-1123', response.headers.get('x-ms-immutability-policy-until-date')) + response_headers['x-ms-immutability-policy-mode']=self._deserialize('str', response.headers.get('x-ms-immutability-policy-mode')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_immutability_policy.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def delete_immutability_policy( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """The Delete Immutability Policy operation deletes the immutability policy on the blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "immutabilityPolicies" + accept = "application/xml" + + # Construct URL + url = self.delete_immutability_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete_immutability_policy.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def set_legal_hold( + self, + legal_hold, # type: bool + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """The Set Legal Hold operation sets a legal hold on the blob. + + :param legal_hold: Specified if a legal hold should be set on the blob. + :type legal_hold: bool + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "legalhold" + accept = "application/xml" + + # Construct URL + url = self.set_legal_hold.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-legal-hold')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_legal_hold.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def set_metadata( + self, + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or + more name-value pairs. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "metadata" + accept = "application/xml" + + # Construct URL + url = self.set_metadata.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def acquire_lease( + self, + timeout=None, # type: Optional[int] + duration=None, # type: Optional[int] + proposed_lease_id=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "acquire" + accept = "application/xml" + + # Construct URL + url = self.acquire_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def release_lease( + self, + lease_id, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "release" + accept = "application/xml" + + # Construct URL + url = self.release_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def renew_lease( + self, + lease_id, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "renew" + accept = "application/xml" + + # Construct URL + url = self.renew_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + renew_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def change_lease( + self, + lease_id, # type: str + proposed_lease_id, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "change" + accept = "application/xml" + + # Construct URL + url = self.change_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def break_lease( + self, + timeout=None, # type: Optional[int] + break_period=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param break_period: For a break operation, proposed duration the lease should continue before + it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining on the lease is used. A new + lease will not be available before the break period has expired, but the lease may be held for + longer than the break period. If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease + breaks immediately. + :type break_period: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "lease" + action = "break" + accept = "application/xml" + + # Construct URL + url = self.break_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def create_snapshot( + self, + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Create Snapshot operation creates a read-only snapshot of a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "snapshot" + accept = "application/xml" + + # Construct URL + url = self.create_snapshot.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create_snapshot.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def start_copy_from_url( + self, + copy_source, # type: str + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] + rehydrate_priority=None, # type: Optional[Union[str, "_models.RehydratePriority"]] + request_id_parameter=None, # type: Optional[str] + blob_tags_string=None, # type: Optional[str] + seal_blob=None, # type: Optional[bool] + immutability_policy_expiry=None, # type: Optional[datetime.datetime] + immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] + legal_hold=None, # type: Optional[bool] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Start Copy From URL operation copies a blob or an internet resource to a new blob. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived + blob. + :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param seal_blob: Overrides the sealed state of the destination blob. Service version + 2019-12-12 and newer. + :type seal_blob: bool + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. + :type legal_hold: bool + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _source_if_tags = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_tags = source_modified_access_conditions.source_if_tags + accept = "application/xml" + + # Construct URL + url = self.start_copy_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if rehydrate_priority is not None: + header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + if _source_if_tags is not None: + header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if seal_blob is not None: + header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') + if immutability_policy_expiry is not None: + header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') + if immutability_policy_mode is not None: + header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') + if legal_hold is not None: + header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + + if cls: + return cls(pipeline_response, None, response_headers) + + start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def copy_from_url( + self, + copy_source, # type: str + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] + request_id_parameter=None, # type: Optional[str] + source_content_md5=None, # type: Optional[bytearray] + blob_tags_string=None, # type: Optional[str] + immutability_policy_expiry=None, # type: Optional[datetime.datetime] + immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] + legal_hold=None, # type: Optional[bool] + copy_source_authorization=None, # type: Optional[str] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not + return a response until the copy is complete. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. + :type legal_hold: bool + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. + :type copy_source_authorization: str + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + x_ms_requires_sync = "true" + accept = "application/xml" + + # Construct URL + url = self.copy_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-requires-sync'] = self._serialize.header("x_ms_requires_sync", x_ms_requires_sync, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if immutability_policy_expiry is not None: + header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') + if immutability_policy_mode is not None: + header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') + if legal_hold is not None: + header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') + if copy_source_authorization is not None: + header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + + if cls: + return cls(pipeline_response, None, response_headers) + + copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def abort_copy_from_url( + self, + copy_id, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a + destination blob with zero length and full metadata. + + :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy + Blob operation. + :type copy_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "copy" + copy_action_abort_constant = "abort" + accept = "application/xml" + + # Construct URL + url = self.abort_copy_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def set_tier( + self, + tier, # type: Union[str, "_models.AccessTierRequired"] + snapshot=None, # type: Optional[str] + version_id=None, # type: Optional[str] + timeout=None, # type: Optional[int] + rehydrate_priority=None, # type: Optional[Union[str, "_models.RehydratePriority"]] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a + premium storage account and on a block blob in a blob storage account (locally redundant + storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of + the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not + update the blob's ETag. + + :param tier: Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierRequired + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived + blob. + :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + comp = "tier" + accept = "application/xml" + + # Construct URL + url = self.set_tier.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if rehydrate_priority is not None: + header_parameters['x-ms-rehydrate-priority'] = self._serialize.header("rehydrate_priority", rehydrate_priority, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if response.status_code == 202: + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_tier.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def get_account_info( + self, + **kwargs # type: Any + ): + # type: (...) -> None + """Returns the sku name and account kind. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "account" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_account_info.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) + response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def query( + self, + snapshot=None, # type: Optional[str] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + query_request=None, # type: Optional["_models.QueryRequest"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> IO + """The Query operation enables users to select/project on blob data by providing simple query + expressions. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param query_request: the query request. + :type query_request: ~azure.storage.blob.models.QueryRequest + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "query" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.query.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if query_request is not None: + body_content = self._serialize.body(query_request, 'QueryRequest', is_xml=True) + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-blob-type']=self._deserialize('str', response.headers.get('x-ms-blob-type')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-committed-block-count']=self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + response_headers['x-ms-blob-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + query.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def get_tags( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + snapshot=None, # type: Optional[str] + version_id=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> "_models.BlobTags" + """The Get Tags operation enables users to get the tags associated with a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: BlobTags, or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlobTags + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobTags"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_tags = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + comp = "tags" + accept = "application/xml" + + # Construct URL + url = self.get_tags.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('BlobTags', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def set_tags( + self, + timeout=None, # type: Optional[int] + version_id=None, # type: Optional[str] + transactional_content_md5=None, # type: Optional[bytearray] + transactional_content_crc64=None, # type: Optional[bytearray] + request_id_parameter=None, # type: Optional[str] + tags=None, # type: Optional["_models.BlobTags"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Set Tags operation enables users to set tags on a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param tags: Blob tags. + :type tags: ~azure.storage.blob.models.BlobTags + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_tags = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + comp = "tags" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.set_tags.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + if tags is not None: + body_content = self._serialize.body(tags, 'BlobTags', is_xml=True) + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_tags.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_block_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_block_blob_operations.py new file mode 100644 index 0000000..20a4f54 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_block_blob_operations.py @@ -0,0 +1,1148 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class BlockBlobOperations(object): + """BlockBlobOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def upload( + self, + content_length, # type: int + body, # type: IO + timeout=None, # type: Optional[int] + transactional_content_md5=None, # type: Optional[bytearray] + metadata=None, # type: Optional[str] + tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] + request_id_parameter=None, # type: Optional[str] + blob_tags_string=None, # type: Optional[str] + immutability_policy_expiry=None, # type: Optional[datetime.datetime] + immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] + legal_hold=None, # type: Optional[bool] + blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Upload Block Blob operation updates the content of an existing block blob. Updating an + existing block blob overwrites any existing metadata on the blob. Partial updates are not + supported with Put Blob; the content of the existing blob is overwritten with the content of + the new blob. To perform a partial update of the content of a block blob, use the Put Block + List operation. + + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. + :type legal_hold: bool + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + blob_type = "BlockBlob" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.upload.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if immutability_policy_expiry is not None: + header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') + if immutability_policy_mode is not None: + header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') + if legal_hold is not None: + header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def put_blob_from_url( + self, + content_length, # type: int + copy_source, # type: str + timeout=None, # type: Optional[int] + transactional_content_md5=None, # type: Optional[bytearray] + metadata=None, # type: Optional[str] + tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] + request_id_parameter=None, # type: Optional[str] + source_content_md5=None, # type: Optional[bytearray] + blob_tags_string=None, # type: Optional[str] + copy_source_blob_properties=None, # type: Optional[bool] + copy_source_authorization=None, # type: Optional[str] + blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Put Blob from URL operation creates a new Block Blob where the contents of the blob are + read from a given URL. This API is supported beginning with the 2020-04-08 version. Partial + updates are not supported with Put Blob from URL; the content of an existing blob is + overwritten with the content of the new blob. To perform partial updates to a block blob’s + contents using a source URL, use the Put Block from URL API in conjunction with Put Block List. + + :param content_length: The length of the request. + :type content_length: long + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param copy_source_blob_properties: Optional, default is true. Indicates if properties from + the source blob should be copied. + :type copy_source_blob_properties: bool + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. + :type copy_source_authorization: str + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _source_if_tags = None + if blob_http_headers is not None: + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_tags = source_modified_access_conditions.source_if_tags + blob_type = "BlockBlob" + accept = "application/xml" + + # Construct URL + url = self.put_blob_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + if _source_if_tags is not None: + header_parameters['x-ms-source-if-tags'] = self._serialize.header("source_if_tags", _source_if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if copy_source_blob_properties is not None: + header_parameters['x-ms-copy-source-blob-properties'] = self._serialize.header("copy_source_blob_properties", copy_source_blob_properties, 'bool') + if copy_source_authorization is not None: + header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + put_blob_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def stage_block( + self, + block_id, # type: str + content_length, # type: int + body, # type: IO + transactional_content_md5=None, # type: Optional[bytearray] + transactional_content_crc64=None, # type: Optional[bytearray] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Stage Block operation creates a new block to be committed as part of a blob. + + :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the + string must be less than or equal to 64 bytes in size. For a given blob, the length of the + value specified for the blockid parameter must be the same size for each block. + :type block_id: str + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data. + :type body: IO + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "block" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.stage_block.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + stage_block.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def stage_block_from_url( + self, + block_id, # type: str + content_length, # type: int + source_url, # type: str + source_range=None, # type: Optional[str] + source_content_md5=None, # type: Optional[bytearray] + source_contentcrc64=None, # type: Optional[bytearray] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + copy_source_authorization=None, # type: Optional[str] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Stage Block operation creates a new block to be committed as part of a blob where the + contents are read from a URL. + + :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the + string must be less than or equal to 64 bytes in size. For a given blob, the length of the + value specified for the blockid parameter must be the same size for each block. + :type block_id: str + :param content_length: The length of the request. + :type content_length: long + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. + :type copy_source_authorization: str + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + comp = "block" + accept = "application/xml" + + # Construct URL + url = self.stage_block_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['blockid'] = self._serialize.query("block_id", block_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if copy_source_authorization is not None: + header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def commit_block_list( + self, + blocks, # type: "_models.BlockLookupList" + timeout=None, # type: Optional[int] + transactional_content_md5=None, # type: Optional[bytearray] + transactional_content_crc64=None, # type: Optional[bytearray] + metadata=None, # type: Optional[str] + tier=None, # type: Optional[Union[str, "_models.AccessTierOptional"]] + request_id_parameter=None, # type: Optional[str] + blob_tags_string=None, # type: Optional[str] + immutability_policy_expiry=None, # type: Optional[datetime.datetime] + immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] + legal_hold=None, # type: Optional[bool] + blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Commit Block List operation writes a blob by specifying the list of block IDs that make up + the blob. In order to be written as part of a blob, a block must have been successfully written + to the server in a prior Put Block operation. You can call Put Block List to update a blob by + uploading only those blocks that have changed, then committing the new and existing blocks + together. You can do this by specifying whether to commit a block from the committed block list + or from the uncommitted block list, or to commit the most recently uploaded version of the + block, whichever list it may belong to. + + :param blocks: + :type blocks: ~azure.storage.blob.models.BlockLookupList + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param tier: Optional. Indicates the tier to be set on the blob. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. + :type legal_hold: bool + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_cache_control = None + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "blocklist" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.commit_block_list.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if immutability_policy_expiry is not None: + header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') + if immutability_policy_mode is not None: + header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') + if legal_hold is not None: + header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(blocks, 'BlockLookupList', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + commit_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def get_block_list( + self, + snapshot=None, # type: Optional[str] + list_type="committed", # type: Union[str, "_models.BlockListType"] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> "_models.BlockList" + """The Get Block List operation retrieves the list of blocks that have been uploaded as part of a + block blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param list_type: Specifies whether to return the list of committed blocks, the list of + uncommitted blocks, or both lists together. + :type list_type: str or ~azure.storage.blob.models.BlockListType + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: BlockList, or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlockList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.BlockList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + comp = "blocklist" + accept = "application/xml" + + # Construct URL + url = self.get_block_list.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + query_parameters['blocklisttype'] = self._serialize.query("list_type", list_type, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('BlockList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_block_list.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_container_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_container_operations.py new file mode 100644 index 0000000..1fdd911 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_container_operations.py @@ -0,0 +1,1669 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class ContainerOperations(object): + """ContainerOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create( + self, + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + access=None, # type: Optional[Union[str, "_models.PublicAccessType"]] + request_id_parameter=None, # type: Optional[str] + container_cpk_scope_info=None, # type: Optional["_models.ContainerCpkScopeInfo"] + **kwargs # type: Any + ): + # type: (...) -> None + """creates a new container under the specified account. If the container with the same name + already exists, the operation fails. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param access: Specifies whether data in the container may be accessed publicly and the level + of access. + :type access: str or ~azure.storage.blob.models.PublicAccessType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param container_cpk_scope_info: Parameter group. + :type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _default_encryption_scope = None + _prevent_encryption_scope_override = None + if container_cpk_scope_info is not None: + _default_encryption_scope = container_cpk_scope_info.default_encryption_scope + _prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override + restype = "container" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if access is not None: + header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if _default_encryption_scope is not None: + header_parameters['x-ms-default-encryption-scope'] = self._serialize.header("default_encryption_scope", _default_encryption_scope, 'str') + if _prevent_encryption_scope_override is not None: + header_parameters['x-ms-deny-encryption-scope-override'] = self._serialize.header("prevent_encryption_scope_override", _prevent_encryption_scope_override, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{containerName}'} # type: ignore + + def get_properties( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """returns all user-defined metadata and system properties for the specified container. The data + returned does not include the container's list of blobs. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "container" + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) + response_headers['x-ms-has-immutability-policy']=self._deserialize('bool', response.headers.get('x-ms-has-immutability-policy')) + response_headers['x-ms-has-legal-hold']=self._deserialize('bool', response.headers.get('x-ms-has-legal-hold')) + response_headers['x-ms-default-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-default-encryption-scope')) + response_headers['x-ms-deny-encryption-scope-override']=self._deserialize('bool', response.headers.get('x-ms-deny-encryption-scope-override')) + response_headers['x-ms-immutable-storage-with-versioning-enabled']=self._deserialize('bool', response.headers.get('x-ms-immutable-storage-with-versioning-enabled')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{containerName}'} # type: ignore + + def delete( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """operation marks the specified container for deletion. The container and any blobs contained + within it are later deleted during garbage collection. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + restype = "container" + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{containerName}'} # type: ignore + + def set_metadata( + self, + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """operation sets one or more user-defined name-value pairs for the specified container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + restype = "container" + comp = "metadata" + accept = "application/xml" + + # Construct URL + url = self.set_metadata.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {'url': '/{containerName}'} # type: ignore + + def get_access_policy( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> List["_models.SignedIdentifier"] + """gets the permissions for the specified container. The permissions indicate whether container + data may be accessed publicly. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of SignedIdentifier, or the result of cls(response) + :rtype: list[~azure.storage.blob.models.SignedIdentifier] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "container" + comp = "acl" + accept = "application/xml" + + # Construct URL + url = self.get_access_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-blob-public-access']=self._deserialize('str', response.headers.get('x-ms-blob-public-access')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_access_policy.metadata = {'url': '/{containerName}'} # type: ignore + + def set_access_policy( + self, + timeout=None, # type: Optional[int] + access=None, # type: Optional[Union[str, "_models.PublicAccessType"]] + request_id_parameter=None, # type: Optional[str] + container_acl=None, # type: Optional[List["_models.SignedIdentifier"]] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """sets the permissions for the specified container. The permissions indicate whether blobs in a + container may be accessed publicly. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param access: Specifies whether data in the container may be accessed publicly and the level + of access. + :type access: str or ~azure.storage.blob.models.PublicAccessType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param container_acl: the acls for the container. + :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + restype = "container" + comp = "acl" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.set_access_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if access is not None: + header_parameters['x-ms-blob-public-access'] = self._serialize.header("access", access, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True, 'itemsName': 'SignedIdentifier'}} + if container_acl is not None: + body_content = self._serialize.body(container_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_policy.metadata = {'url': '/{containerName}'} # type: ignore + + def restore( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + deleted_container_name=None, # type: Optional[str] + deleted_container_version=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Restores a previously-deleted container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param deleted_container_name: Optional. Version 2019-12-12 and later. Specifies the name of + the deleted container to restore. + :type deleted_container_name: str + :param deleted_container_version: Optional. Version 2019-12-12 and later. Specifies the + version of the deleted container to restore. + :type deleted_container_version: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "undelete" + accept = "application/xml" + + # Construct URL + url = self.restore.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if deleted_container_name is not None: + header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') + if deleted_container_version is not None: + header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + restore.metadata = {'url': '/{containerName}'} # type: ignore + + def rename( + self, + source_container_name, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + source_lease_id=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Renames an existing container. + + :param source_container_name: Required. Specifies the name of the container to rename. + :type source_container_name: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param source_lease_id: A lease ID for the source path. If specified, the source path must have + an active lease and the lease ID must match. + :type source_lease_id: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "rename" + accept = "application/xml" + + # Construct URL + url = self.rename.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-source-container-name'] = self._serialize.header("source_container_name", source_container_name, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {'url': '/{containerName}'} # type: ignore + + def submit_batch( + self, + content_length, # type: int + multipart_content_type, # type: str + body, # type: IO + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> IO + """The Batch operation allows multiple API calls to be embedded into a single HTTP request. + + :param content_length: The length of the request. + :type content_length: long + :param multipart_content_type: Required. The value of this header must be multipart/mixed with + a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. + :type multipart_content_type: str + :param body: Initial data. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "batch" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.submit_batch.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(body, 'IO', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + submit_batch.metadata = {'url': '/{containerName}'} # type: ignore + + def acquire_lease( + self, + timeout=None, # type: Optional[int] + duration=None, # type: Optional[int] + proposed_lease_id=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "acquire" + accept = "application/xml" + + # Construct URL + url = self.acquire_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {'url': '/{containerName}'} # type: ignore + + def release_lease( + self, + lease_id, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "release" + accept = "application/xml" + + # Construct URL + url = self.release_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {'url': '/{containerName}'} # type: ignore + + def renew_lease( + self, + lease_id, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "renew" + accept = "application/xml" + + # Construct URL + url = self.renew_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + renew_lease.metadata = {'url': '/{containerName}'} # type: ignore + + def break_lease( + self, + timeout=None, # type: Optional[int] + break_period=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param break_period: For a break operation, proposed duration the lease should continue before + it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining on the lease is used. A new + lease will not be available before the break period has expired, but the lease may be held for + longer than the break period. If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease + breaks immediately. + :type break_period: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "break" + accept = "application/xml" + + # Construct URL + url = self.break_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {'url': '/{containerName}'} # type: ignore + + def change_lease( + self, + lease_id, # type: str + proposed_lease_id, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + comp = "lease" + restype = "container" + action = "change" + accept = "application/xml" + + # Construct URL + url = self.change_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {'url': '/{containerName}'} # type: ignore + + def list_blob_flat_segment( + self, + prefix=None, # type: Optional[str] + marker=None, # type: Optional[str] + maxresults=None, # type: Optional[int] + include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.ListBlobsFlatSegmentResponse" + """[Update] The List Blobs operation returns a list of the blobs under the specified container. + + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. + :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListBlobsFlatSegmentResponse, or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsFlatSegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_blob_flat_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListBlobsFlatSegmentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_blob_flat_segment.metadata = {'url': '/{containerName}'} # type: ignore + + def list_blob_hierarchy_segment( + self, + delimiter, # type: str + prefix=None, # type: Optional[str] + marker=None, # type: Optional[str] + maxresults=None, # type: Optional[int] + include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.ListBlobsHierarchySegmentResponse" + """[Update] The List Blobs operation returns a list of the blobs under the specified container. + + :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix + element in the response body that acts as a placeholder for all blobs whose names begin with + the same substring up to the appearance of the delimiter character. The delimiter may be a + single character or a string. + :type delimiter: str + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. + :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_blob_hierarchy_segment.metadata = {'url': '/{containerName}'} # type: ignore + + def get_account_info( + self, + **kwargs # type: Any + ): + # type: (...) -> None + """Returns the sku name and account kind. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "account" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_account_info.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) + response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {'url': '/{containerName}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_directory_operations.py new file mode 100644 index 0000000..0ebb32d --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_directory_operations.py @@ -0,0 +1,751 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class DirectoryOperations(object): + """DirectoryOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create( + self, + timeout=None, # type: Optional[int] + directory_properties=None, # type: Optional[str] + posix_permissions=None, # type: Optional[str] + posix_umask=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + directory_http_headers=None, # type: Optional["_models.DirectoryHttpHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Create a directory. By default, the destination is overwritten and if the destination already + exists and has a lease the lease is broken. This operation supports conditional HTTP requests. + For more information, see `Specifying Conditional Headers for Blob Service Operations + `_. + To fail if the destination already exists, use a conditional request with If-None-Match: "*". + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param directory_properties: Optional. User-defined properties to be stored with the file or + directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", + where each value is base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask + restricts permission settings for file and directory, and will only be applied when default Acl + does not exist in parent directory. If the umask bit has set, it means that the corresponding + permission will be disabled. Otherwise the corresponding permission will be determined by the + permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, + a default umask - 0027 will be used. + :type posix_umask: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param directory_http_headers: Parameter group. + :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _cache_control = None + _content_type = None + _content_encoding = None + _content_language = None + _content_disposition = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + if directory_http_headers is not None: + _cache_control = directory_http_headers.cache_control + _content_type = directory_http_headers.content_type + _content_encoding = directory_http_headers.content_encoding + _content_language = directory_http_headers.content_language + _content_disposition = directory_http_headers.content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + resource = "directory" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['resource'] = self._serialize.query("resource", resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def rename( + self, + rename_source, # type: str + timeout=None, # type: Optional[int] + marker=None, # type: Optional[str] + path_rename_mode=None, # type: Optional[Union[str, "_models.PathRenameMode"]] + directory_properties=None, # type: Optional[str] + posix_permissions=None, # type: Optional[str] + posix_umask=None, # type: Optional[str] + source_lease_id=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + directory_http_headers=None, # type: Optional["_models.DirectoryHttpHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Rename a directory. By default, the destination is overwritten and if the destination already + exists and has a lease the lease is broken. This operation supports conditional HTTP requests. + For more information, see `Specifying Conditional Headers for Blob Service Operations + `_. + To fail if the destination already exists, use a conditional request with If-None-Match: "*". + + :param rename_source: The file or directory to be renamed. The value must have the following + format: "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will + overwrite the existing properties; otherwise, the existing properties will be preserved. + :type rename_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param marker: When renaming a directory, the number of paths that are renamed with each + invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation + token is returned in this response header. When a continuation token is returned in the + response, it must be specified in a subsequent invocation of the rename operation to continue + renaming the directory. + :type marker: str + :param path_rename_mode: Determines the behavior of the rename operation. + :type path_rename_mode: str or ~azure.storage.blob.models.PathRenameMode + :param directory_properties: Optional. User-defined properties to be stored with the file or + directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", + where each value is base64 encoded. + :type directory_properties: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_umask: Only valid if Hierarchical Namespace is enabled for the account. This umask + restricts permission settings for file and directory, and will only be applied when default Acl + does not exist in parent directory. If the umask bit has set, it means that the corresponding + permission will be disabled. Otherwise the corresponding permission will be determined by the + permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified, + a default umask - 0027 will be used. + :type posix_umask: str + :param source_lease_id: A lease ID for the source path. If specified, the source path must have + an active lease and the lease ID must match. + :type source_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param directory_http_headers: Parameter group. + :type directory_http_headers: ~azure.storage.blob.models.DirectoryHttpHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _cache_control = None + _content_type = None + _content_encoding = None + _content_language = None + _content_disposition = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if directory_http_headers is not None: + _cache_control = directory_http_headers.cache_control + _content_type = directory_http_headers.content_type + _content_encoding = directory_http_headers.content_encoding + _content_language = directory_http_headers.content_language + _content_disposition = directory_http_headers.content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + accept = "application/xml" + + # Construct URL + url = self.rename.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') + if path_rename_mode is not None: + query_parameters['mode'] = self._serialize.query("path_rename_mode", path_rename_mode, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if directory_properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("directory_properties", directory_properties, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("posix_umask", posix_umask, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def delete( + self, + recursive_directory_delete, # type: bool + timeout=None, # type: Optional[int] + marker=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Deletes the directory. + + :param recursive_directory_delete: If "true", all paths beneath the directory will be deleted. + If "false" and the directory is non-empty, an error occurs. + :type recursive_directory_delete: bool + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param marker: When renaming a directory, the number of paths that are renamed with each + invocation is limited. If the number of paths to be renamed exceeds this limit, a continuation + token is returned in this response header. When a continuation token is returned in the + response, it must be specified in a subsequent invocation of the rename operation to continue + renaming the directory. + :type marker: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['recursive'] = self._serialize.query("recursive_directory_delete", recursive_directory_delete, 'bool') + if marker is not None: + query_parameters['continuation'] = self._serialize.query("marker", marker, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def set_access_control( + self, + timeout=None, # type: Optional[int] + owner=None, # type: Optional[str] + group=None, # type: Optional[str] + posix_permissions=None, # type: Optional[str] + posix_acl=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Set the owner, group, permissions, or access control list for a directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param posix_permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type posix_permissions: str + :param posix_acl: Sets POSIX access control rights on files and directories. The value is a + comma-separated list of access control entries. Each access control entry (ACE) consists of a + scope, a type, a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type posix_acl: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "setAccessControl" + accept = "application/xml" + + # Construct URL + url = self.set_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if posix_permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("posix_permissions", posix_permissions, 'str') + if posix_acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("posix_acl", posix_acl, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def get_access_control( + self, + timeout=None, # type: Optional[int] + upn=None, # type: Optional[bool] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Get the owner, group, permissions, or access control list for a directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If + "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response + headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If + "false", the values will be returned as Azure Active Directory Object IDs. The default value is + false. + :type upn: bool + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "getAccessControl" + accept = "application/xml" + + # Construct URL + url = self.get_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.DataLakeStorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) + response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) + response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) + response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_page_blob_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_page_blob_operations.py new file mode 100644 index 0000000..c953df2 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_page_blob_operations.py @@ -0,0 +1,1437 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class PageBlobOperations(object): + """PageBlobOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create( + self, + content_length, # type: int + blob_content_length, # type: int + timeout=None, # type: Optional[int] + tier=None, # type: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] + metadata=None, # type: Optional[str] + blob_sequence_number=0, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + blob_tags_string=None, # type: Optional[str] + immutability_policy_expiry=None, # type: Optional[datetime.datetime] + immutability_policy_mode=None, # type: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] + legal_hold=None, # type: Optional[bool] + blob_http_headers=None, # type: Optional["_models.BlobHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Create operation creates a new page blob. + + :param content_length: The length of the request. + :type content_length: long + :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 + TB. The page blob size must be aligned to a 512-byte boundary. + :type blob_content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param tier: Optional. Indicates the tier to be set on the page blob. + :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. + :type metadata: str + :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled + value that you can use to track requests. The value of the sequence number must be between 0 + and 2^63 - 1. + :type blob_sequence_number: long + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. + :type legal_hold: bool + :param blob_http_headers: Parameter group. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_content_type = blob_http_headers.blob_content_type + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + blob_type = "PageBlob" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-blob-type'] = self._serialize.header("blob_type", blob_type, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("tier", tier, 'str') + if _blob_content_type is not None: + header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", _blob_content_type, 'str') + if _blob_content_encoding is not None: + header_parameters['x-ms-blob-content-encoding'] = self._serialize.header("blob_content_encoding", _blob_content_encoding, 'str') + if _blob_content_language is not None: + header_parameters['x-ms-blob-content-language'] = self._serialize.header("blob_content_language", _blob_content_language, 'str') + if _blob_content_md5 is not None: + header_parameters['x-ms-blob-content-md5'] = self._serialize.header("blob_content_md5", _blob_content_md5, 'bytearray') + if _blob_cache_control is not None: + header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", _blob_cache_control, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _blob_content_disposition is not None: + header_parameters['x-ms-blob-content-disposition'] = self._serialize.header("blob_content_disposition", _blob_content_disposition, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') + if blob_sequence_number is not None: + header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if immutability_policy_expiry is not None: + header_parameters['x-ms-immutability-policy-until-date'] = self._serialize.header("immutability_policy_expiry", immutability_policy_expiry, 'rfc-1123') + if immutability_policy_mode is not None: + header_parameters['x-ms-immutability-policy-mode'] = self._serialize.header("immutability_policy_mode", immutability_policy_mode, 'str') + if legal_hold is not None: + header_parameters['x-ms-legal-hold'] = self._serialize.header("legal_hold", legal_hold, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-version-id']=self._deserialize('str', response.headers.get('x-ms-version-id')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def upload_pages( + self, + content_length, # type: int + body, # type: IO + transactional_content_md5=None, # type: Optional[bytearray] + transactional_content_crc64=None, # type: Optional[bytearray] + timeout=None, # type: Optional[int] + range=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Upload Pages operation writes a range of pages to a page blob. + + :param content_length: The length of the request. + :type content_length: long + :param body: Initial data. + :type body: IO + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param sequence_number_access_conditions: Parameter group. + :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if sequence_number_access_conditions is not None: + _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + comp = "page" + page_write = "update" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.upload_pages.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') + if _if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') + if _if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def clear_pages( + self, + content_length, # type: int + timeout=None, # type: Optional[int] + range=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Clear Pages operation clears a set of pages from a page blob. + + :param content_length: The length of the request. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param sequence_number_access_conditions: Parameter group. + :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if sequence_number_access_conditions is not None: + _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + comp = "page" + page_write = "clear" + accept = "application/xml" + + # Construct URL + url = self.clear_pages.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') + if _if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') + if _if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + clear_pages.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def upload_pages_from_url( + self, + source_url, # type: str + source_range, # type: str + content_length, # type: int + range, # type: str + source_content_md5=None, # type: Optional[bytearray] + source_contentcrc64=None, # type: Optional[bytearray] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + copy_source_authorization=None, # type: Optional[str] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + sequence_number_access_conditions=None, # type: Optional["_models.SequenceNumberAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Upload Pages operation writes a range of pages to a page blob where the contents are read + from a URL. + + :param source_url: Specify a URL to the copy source. + :type source_url: str + :param source_range: Bytes of source data in the specified range. The length of this range + should match the ContentLength header and x-ms-range/Range destination range header. + :type source_range: str + :param content_length: The length of the request. + :type content_length: long + :param range: The range of bytes to which the source range would be written. The range should + be 512 aligned and range-end is required. + :type range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. + :type source_content_md5: bytearray + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. + :type source_contentcrc64: bytearray + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. + :type copy_source_authorization: str + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param sequence_number_access_conditions: Parameter group. + :type sequence_number_access_conditions: ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + if sequence_number_access_conditions is not None: + _if_sequence_number_less_than_or_equal_to = sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + if source_modified_access_conditions is not None: + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + comp = "page" + page_write = "update" + accept = "application/xml" + + # Construct URL + url = self.upload_pages_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-page-write'] = self._serialize.header("page_write", page_write, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("source_url", source_url, 'str') + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + if source_content_md5 is not None: + header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if source_contentcrc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_contentcrc64", source_contentcrc64, 'bytearray') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_sequence_number_less_than_or_equal_to is not None: + header_parameters['x-ms-if-sequence-number-le'] = self._serialize.header("if_sequence_number_less_than_or_equal_to", _if_sequence_number_less_than_or_equal_to, 'long') + if _if_sequence_number_less_than is not None: + header_parameters['x-ms-if-sequence-number-lt'] = self._serialize.header("if_sequence_number_less_than", _if_sequence_number_less_than, 'long') + if _if_sequence_number_equal_to is not None: + header_parameters['x-ms-if-sequence-number-eq'] = self._serialize.header("if_sequence_number_equal_to", _if_sequence_number_equal_to, 'long') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if copy_source_authorization is not None: + header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-encryption-key-sha256']=self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')) + response_headers['x-ms-encryption-scope']=self._deserialize('str', response.headers.get('x-ms-encryption-scope')) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_pages_from_url.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def get_page_ranges( + self, + snapshot=None, # type: Optional[str] + timeout=None, # type: Optional[int] + range=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> "_models.PageList" + """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot + of a page blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PageList, or the result of cls(response) + :rtype: ~azure.storage.blob.models.PageList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "pagelist" + accept = "application/xml" + + # Construct URL + url = self.get_page_ranges.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('PageList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_page_ranges.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def get_page_ranges_diff( + self, + snapshot=None, # type: Optional[str] + timeout=None, # type: Optional[int] + prevsnapshot=None, # type: Optional[str] + prev_snapshot_url=None, # type: Optional[str] + range=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> "_models.PageList" + """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that + were changed between target blob and previous snapshot. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a + DateTime value that specifies that the response will contain only pages that were changed + between target blob and previous snapshot. Changed pages include both updated and cleared + pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is + the older of the two. Note that incremental snapshots are currently supported only for blobs + created on or after January 1, 2016. + :type prevsnapshot: str + :param prev_snapshot_url: Optional. This header is only supported in service versions + 2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The + response will only contain pages that were changed between the target blob and its previous + snapshot. + :type prev_snapshot_url: str + :param range: Return only the bytes of the blob in the specified range. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PageList, or the result of cls(response) + :rtype: ~azure.storage.blob.models.PageList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PageList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "pagelist" + accept = "application/xml" + + # Construct URL + url = self.get_page_ranges_diff.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if prevsnapshot is not None: + query_parameters['prevsnapshot'] = self._serialize.query("prevsnapshot", prevsnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if prev_snapshot_url is not None: + header_parameters['x-ms-previous-snapshot-url'] = self._serialize.header("prev_snapshot_url", prev_snapshot_url, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['x-ms-blob-content-length']=self._deserialize('long', response.headers.get('x-ms-blob-content-length')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('PageList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_page_ranges_diff.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def resize( + self, + blob_content_length, # type: int + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + cpk_info=None, # type: Optional["_models.CpkInfo"] + cpk_scope_info=None, # type: Optional["_models.CpkScopeInfo"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Resize the Blob. + + :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 + TB. The page blob size must be aligned to a 512-byte boundary. + :type blob_content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if cpk_info is not None: + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + _encryption_algorithm = cpk_info.encryption_algorithm + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.resize.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", _encryption_key, 'str') + if _encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", _encryption_key_sha256, 'str') + if _encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", _encryption_algorithm, 'str') + if _encryption_scope is not None: + header_parameters['x-ms-encryption-scope'] = self._serialize.header("encryption_scope", _encryption_scope, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-blob-content-length'] = self._serialize.header("blob_content_length", blob_content_length, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + resize.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def update_sequence_number( + self, + sequence_number_action, # type: Union[str, "_models.SequenceNumberActionType"] + timeout=None, # type: Optional[int] + blob_sequence_number=0, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Update the sequence number of the blob. + + :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the + request. This property applies to page blobs only. This property indicates how the service + should modify the blob's sequence number. + :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled + value that you can use to track requests. The value of the sequence number must be between 0 + and 2^63 - 1. + :type blob_sequence_number: long + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.update_sequence_number.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-sequence-number-action'] = self._serialize.header("sequence_number_action", sequence_number_action, 'str') + if blob_sequence_number is not None: + header_parameters['x-ms-blob-sequence-number'] = self._serialize.header("blob_sequence_number", blob_sequence_number, 'long') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-blob-sequence-number']=self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + update_sequence_number.metadata = {'url': '/{containerName}/{blob}'} # type: ignore + + def copy_incremental( + self, + copy_source, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Copy Incremental operation copies a snapshot of the source page blob to a destination page + blob. The snapshot is copied such that only the differential changes between the previously + copied snapshot are transferred to the destination. The copied snapshots are complete copies of + the original snapshot and can be read or copied from as usual. This API is supported since REST + version 2016-05-31. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + comp = "incrementalcopy" + accept = "application/xml" + + # Construct URL + url = self.copy_incremental.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_tags is not None: + header_parameters['x-ms-if-tags'] = self._serialize.header("if_tags", _if_tags, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + + if cls: + return cls(pipeline_response, None, response_headers) + + copy_incremental.metadata = {'url': '/{containerName}/{blob}'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_service_operations.py new file mode 100644 index 0000000..1292561 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_generated/operations/_service_operations.py @@ -0,0 +1,710 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class ServiceOperations(object): + """ServiceOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.blob.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def set_properties( + self, + storage_service_properties, # type: "_models.StorageServiceProperties" + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Sets properties for a storage account's Blob service endpoint, including properties for Storage + Analytics and CORS (Cross-Origin Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. + :type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "properties" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.set_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {'url': '/'} # type: ignore + + def get_properties( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.StorageServiceProperties" + """gets the properties of a storage account's Blob service, including properties for Storage + Analytics and CORS (Cross-Origin Resource Sharing) rules. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageServiceProperties, or the result of cls(response) + :rtype: ~azure.storage.blob.models.StorageServiceProperties + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = self._deserialize('StorageServiceProperties', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_properties.metadata = {'url': '/'} # type: ignore + + def get_statistics( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.StorageServiceStats" + """Retrieves statistics related to replication for the Blob service. It is only available on the + secondary location endpoint when read-access geo-redundant replication is enabled for the + storage account. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageServiceStats, or the result of cls(response) + :rtype: ~azure.storage.blob.models.StorageServiceStats + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceStats"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "stats" + accept = "application/xml" + + # Construct URL + url = self.get_statistics.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('StorageServiceStats', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_statistics.metadata = {'url': '/'} # type: ignore + + def list_containers_segment( + self, + prefix=None, # type: Optional[str] + marker=None, # type: Optional[str] + maxresults=None, # type: Optional[int] + include=None, # type: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.ListContainersSegmentResponse" + """The List Containers Segment operation returns a list of the containers under the specified + account. + + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. + :type maxresults: int + :param include: Include this parameter to specify that the container's metadata be returned as + part of the response body. + :type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListContainersSegmentResponse, or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainersSegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_containers_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = self._deserialize('ListContainersSegmentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_containers_segment.metadata = {'url': '/'} # type: ignore + + def get_user_delegation_key( + self, + key_info, # type: "_models.KeyInfo" + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.UserDelegationKey" + """Retrieves a user delegation key for the Blob service. This is only a valid operation when using + bearer token authentication. + + :param key_info: + :type key_info: ~azure.storage.blob.models.KeyInfo + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: UserDelegationKey, or the result of cls(response) + :rtype: ~azure.storage.blob.models.UserDelegationKey + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.UserDelegationKey"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "userdelegationkey" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.get_user_delegation_key.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(key_info, 'KeyInfo', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('UserDelegationKey', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_user_delegation_key.metadata = {'url': '/'} # type: ignore + + def get_account_info( + self, + **kwargs # type: Any + ): + # type: (...) -> None + """Returns the sku name and account kind. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "account" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_account_info.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name')) + response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind')) + response_headers['x-ms-is-hns-enabled']=self._deserialize('bool', response.headers.get('x-ms-is-hns-enabled')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {'url': '/'} # type: ignore + + def submit_batch( + self, + content_length, # type: int + multipart_content_type, # type: str + body, # type: IO + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> IO + """The Batch operation allows multiple API calls to be embedded into a single HTTP request. + + :param content_length: The length of the request. + :type content_length: long + :param multipart_content_type: Required. The value of this header must be multipart/mixed with + a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:``. + :type multipart_content_type: str + :param body: Initial data. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "batch" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.submit_batch.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(body, 'IO', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + submit_batch.metadata = {'url': '/'} # type: ignore + + def filter_blobs( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + where=None, # type: Optional[str] + marker=None, # type: Optional[str] + maxresults=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.FilterBlobSegment" + """The Filter Blobs operation enables callers to list blobs across all containers whose tags match + a given search expression. Filter blobs searches across all containers within a storage + account but can be scoped within the expression to a single container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param where: Filters the results to return only to return only blobs whose tags match the + specified expression. + :type where: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. + :type maxresults: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: FilterBlobSegment, or the result of cls(response) + :rtype: ~azure.storage.blob.models.FilterBlobSegment + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "blobs" + accept = "application/xml" + + # Construct URL + url = self.filter_blobs.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if where is not None: + query_parameters['where'] = self._serialize.query("where", where, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('FilterBlobSegment', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + filter_blobs.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_lease.py b/azure/multiapi/storagev2/blob/v2020_10_02/_lease.py new file mode 100644 index 0000000..d495d6e --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_lease.py @@ -0,0 +1,331 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import uuid + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, TypeVar, TYPE_CHECKING +) + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator import distributed_trace + +from ._shared.response_handlers import return_response_headers, process_storage_error +from ._serialize import get_modify_conditions + +if TYPE_CHECKING: + from datetime import datetime + + BlobClient = TypeVar("BlobClient") + ContainerClient = TypeVar("ContainerClient") + + +class BlobLeaseClient(object): + """Creates a new BlobLeaseClient. + + This client provides lease operations on a BlobClient or ContainerClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the blob or container to lease. + :type client: ~azure.storage.blob.BlobClient or + ~azure.storage.blob.ContainerClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + def __init__( + self, client, lease_id=None + ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs + # type: (Union[BlobClient, ContainerClient], Optional[str]) -> None + self.id = lease_id or str(uuid.uuid4()) + self.last_modified = None + self.etag = None + if hasattr(client, 'blob_name'): + self._client = client._client.blob # type: ignore # pylint: disable=protected-access + elif hasattr(client, 'container_name'): + self._client = client._client.container # type: ignore # pylint: disable=protected-access + else: + raise TypeError("Lease must use either BlobClient or ContainerClient.") + + def __enter__(self): + return self + + def __exit__(self, *args): + self.release() + + @distributed_trace + def acquire(self, lease_duration=-1, **kwargs): + # type: (int, **Any) -> None + """Requests a new lease. + + If the container does not have an active lease, the Blob service creates a + lease on the container and returns a new lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.acquire_lease( + timeout=kwargs.pop('timeout', None), + duration=lease_duration, + proposed_lease_id=self.id, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + self.etag = response.get('etag') # type: str + + @distributed_trace + def renew(self, **kwargs): + # type: (Any) -> None + """Renews the lease. + + The lease can be renewed if the lease ID specified in the + lease client matches that associated with the container or blob. Note that + the lease may be renewed even if it has expired as long as the container + or blob has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.renew_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def release(self, **kwargs): + # type: (Any) -> None + """Release the lease. + + The lease may be released if the client lease id specified matches + that associated with the container or blob. Releasing the lease allows another client + to immediately acquire the lease for the container or blob as soon as the release is complete. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.release_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """Change the lease ID of an active lease. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.change_lease( + lease_id=self.id, + proposed_lease_id=proposed_lease_id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def break_lease(self, lease_break_period=None, **kwargs): + # type: (Optional[int], Any) -> int + """Break the lease, if the container or blob has an active lease. + + Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. When a lease + is broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the container or blob. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :param int lease_break_period: + This is the proposed duration of seconds that the lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the lease. If longer, the time remaining on the lease is used. + A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.break_lease( + timeout=kwargs.pop('timeout', None), + break_period=lease_break_period, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2020_10_02/_list_blobs_helper.py new file mode 100644 index 0000000..309d37b --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_list_blobs_helper.py @@ -0,0 +1,236 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from azure.core.paging import PageIterator, ItemPaged +from azure.core.exceptions import HttpResponseError +from ._deserialize import get_blob_properties_from_generated_code, parse_tags +from ._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix, FilterBlobItem +from ._models import BlobProperties, FilteredBlob +from ._shared.models import DictMixin +from ._shared.response_handlers import return_context_and_deserialized, process_storage_error + + +class BlobPropertiesPaged(PageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str container: The name of the container. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + prefix=None, + results_per_page=None, + continuation_token=None, + delimiter=None, + location_mode=None): + super(BlobPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.container = container + self.delimiter = delimiter + self.current_page = None + self.location_mode = location_mode + + def _get_next_cb(self, continuation_token): + try: + return self._command( + prefix=self.prefix, + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.container = self._response.container_name + self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] + + return self._response.next_marker or None, self.current_page + + def _build_item(self, item): + if isinstance(item, BlobProperties): + return item + if isinstance(item, BlobItemInternal): + blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access + blob.container = self.container + return blob + return item + + +class BlobPrefixPaged(BlobPropertiesPaged): + def __init__(self, *args, **kwargs): + super(BlobPrefixPaged, self).__init__(*args, **kwargs) + self.name = self.prefix + + def _extract_data_cb(self, get_next_return): + continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) + self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items + self.current_page = [self._build_item(item) for item in self.current_page] + self.delimiter = self._response.delimiter + + return continuation_token, self.current_page + + def _build_item(self, item): + item = super(BlobPrefixPaged, self)._build_item(item) + if isinstance(item, GenBlobPrefix): + return BlobPrefix( + self._command, + container=self.container, + prefix=item.name, + results_per_page=self.results_per_page, + location_mode=self.location_mode) + return item + + +class BlobPrefix(ItemPaged, DictMixin): + """An Iterable of Blob properties. + + Returned from walk_blobs when a delimiter is used. + Can be thought of as a virtual blob directory. + + :ivar str name: The prefix, or "directory name" of the blob. + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str next_marker: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str marker: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__(self, *args, **kwargs): + super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) + self.name = kwargs.get('prefix') + self.prefix = kwargs.get('prefix') + self.results_per_page = kwargs.get('results_per_page') + self.container = kwargs.get('container') + self.delimiter = kwargs.get('delimiter') + self.location_mode = kwargs.get('location_mode') + + +class FilteredBlobPaged(PageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.FilteredBlob) + :ivar str container: The container that the blobs are listed from. + + :param callable command: Function to retrieve the next page of items. + :param str container: The name of the container. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + results_per_page=None, + continuation_token=None, + location_mode=None): + super(FilteredBlobPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.marker = continuation_token + self.results_per_page = results_per_page + self.container = container + self.current_page = None + self.location_mode = location_mode + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.marker = self._response.next_marker + self.current_page = [self._build_item(item) for item in self._response.blobs] + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_item(item): + if isinstance(item, FilterBlobItem): + tags = parse_tags(item.tags) + blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags) + return blob + return item diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_models.py b/azure/multiapi/storagev2/blob/v2020_10_02/_models.py new file mode 100644 index 0000000..68c3b4d --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_models.py @@ -0,0 +1,1209 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines + +from enum import Enum + +from azure.core.paging import PageIterator +from azure.core.exceptions import HttpResponseError +from ._generated.models import ArrowField + +from ._shared import decode_base64_to_bytes +from ._shared.response_handlers import return_context_and_deserialized, process_storage_error +from ._shared.models import DictMixin, get_enum_value +from ._generated.models import Logging as GeneratedLogging +from ._generated.models import Metrics as GeneratedMetrics +from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy +from ._generated.models import StaticWebsite as GeneratedStaticWebsite +from ._generated.models import CorsRule as GeneratedCorsRule +from ._generated.models import AccessPolicy as GenAccessPolicy + + +class BlobType(str, Enum): + + BlockBlob = "BlockBlob" + PageBlob = "PageBlob" + AppendBlob = "AppendBlob" + + +class BlockState(str, Enum): + """Block blob block types.""" + + Committed = 'Committed' #: Committed blocks. + Latest = 'Latest' #: Latest blocks. + Uncommitted = 'Uncommitted' #: Uncommitted blocks. + + +class StandardBlobTier(str, Enum): + """ + Specifies the blob tier to set the blob to. This is only applicable for + block blobs on standard storage accounts. + """ + + Archive = 'Archive' #: Archive + Cool = 'Cool' #: Cool + Hot = 'Hot' #: Hot + + +class PremiumPageBlobTier(str, Enum): + """ + Specifies the page blob tier to set the blob to. This is only applicable to page + blobs on premium storage accounts. Please take a look at: + https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets + for detailed information on the corresponding IOPS and throughput per PageBlobTier. + """ + + P4 = 'P4' #: P4 Tier + P6 = 'P6' #: P6 Tier + P10 = 'P10' #: P10 Tier + P20 = 'P20' #: P20 Tier + P30 = 'P30' #: P30 Tier + P40 = 'P40' #: P40 Tier + P50 = 'P50' #: P50 Tier + P60 = 'P60' #: P60 Tier + + +class QuickQueryDialect(str, Enum): + """Specifies the quick query input/output dialect.""" + + DelimitedText = 'DelimitedTextDialect' + DelimitedJson = 'DelimitedJsonDialect' + Parquet = 'ParquetDialect' + + +class SequenceNumberAction(str, Enum): + """Sequence number actions.""" + + Increment = 'increment' + """ + Increments the value of the sequence number by 1. If specifying this option, + do not include the x-ms-blob-sequence-number header. + """ + + Max = 'max' + """ + Sets the sequence number to be the higher of the value included with the + request and the value currently stored for the blob. + """ + + Update = 'update' + """Sets the sequence number to the value included with the request.""" + + +class PublicAccess(str, Enum): + """ + Specifies whether data in the container may be accessed publicly and the level of access. + """ + + OFF = 'off' + """ + Specifies that there is no public read access for both the container and blobs within the container. + Clients cannot enumerate the containers within the storage account as well as the blobs within the container. + """ + + Blob = 'blob' + """ + Specifies public read access for blobs. Blob data within this container can be read + via anonymous request, but container data is not available. Clients cannot enumerate + blobs within the container via anonymous request. + """ + + Container = 'container' + """ + Specifies full public read access for container and blob data. Clients can enumerate + blobs within the container via anonymous request, but cannot enumerate containers + within the storage account. + """ + + +class BlobImmutabilityPolicyMode(str, Enum): + """ + Specifies the immutability policy mode to set on the blob. + "Mutable" can only be returned by service, don't set to "Mutable". + """ + + Unlocked = "Unlocked" + Locked = "Locked" + Mutable = "Mutable" + + +class BlobAnalyticsLogging(GeneratedLogging): + """Azure Analytics Logging settings. + + :keyword str version: + The version of Storage Analytics to configure. The default value is 1.0. + :keyword bool delete: + Indicates whether all delete requests should be logged. The default value is `False`. + :keyword bool read: + Indicates whether all read requests should be logged. The default value is `False`. + :keyword bool write: + Indicates whether all write requests should be logged. The default value is `False`. + :keyword ~azure.storage.blob.RetentionPolicy retention_policy: + Determines how long the associated data should persist. If not specified the retention + policy will be disabled by default. + """ + + def __init__(self, **kwargs): + self.version = kwargs.get('version', u'1.0') + self.delete = kwargs.get('delete', False) + self.read = kwargs.get('read', False) + self.write = kwargs.get('write', False) + self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + version=generated.version, + delete=generated.delete, + read=generated.read, + write=generated.write, + retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access + ) + + +class Metrics(GeneratedMetrics): + """A summary of request statistics grouped by API in hour or minute aggregates + for blobs. + + :keyword str version: + The version of Storage Analytics to configure. The default value is 1.0. + :keyword bool enabled: + Indicates whether metrics are enabled for the Blob service. + The default value is `False`. + :keyword bool include_apis: + Indicates whether metrics should generate summary statistics for called API operations. + :keyword ~azure.storage.blob.RetentionPolicy retention_policy: + Determines how long the associated data should persist. If not specified the retention + policy will be disabled by default. + """ + + def __init__(self, **kwargs): + self.version = kwargs.get('version', u'1.0') + self.enabled = kwargs.get('enabled', False) + self.include_apis = kwargs.get('include_apis') + self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + version=generated.version, + enabled=generated.enabled, + include_apis=generated.include_apis, + retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access + ) + + +class RetentionPolicy(GeneratedRetentionPolicy): + """The retention policy which determines how long the associated data should + persist. + + :param bool enabled: + Indicates whether a retention policy is enabled for the storage service. + The default value is False. + :param int days: + Indicates the number of days that metrics or logging or + soft-deleted data should be retained. All data older than this value will + be deleted. If enabled=True, the number of days must be specified. + """ + + def __init__(self, enabled=False, days=None): + super(RetentionPolicy, self).__init__(enabled=enabled, days=days, allow_permanent_delete=None) + if self.enabled and (self.days is None): + raise ValueError("If policy is enabled, 'days' must be specified.") + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + enabled=generated.enabled, + days=generated.days, + ) + + +class StaticWebsite(GeneratedStaticWebsite): + """The properties that enable an account to host a static website. + + :keyword bool enabled: + Indicates whether this account is hosting a static website. + The default value is `False`. + :keyword str index_document: + The default name of the index page under each directory. + :keyword str error_document404_path: + The absolute path of the custom 404 page. + :keyword str default_index_document_path: + Absolute path of the default index page. + """ + + def __init__(self, **kwargs): + self.enabled = kwargs.get('enabled', False) + if self.enabled: + self.index_document = kwargs.get('index_document') + self.error_document404_path = kwargs.get('error_document404_path') + self.default_index_document_path = kwargs.get('default_index_document_path') + else: + self.index_document = None + self.error_document404_path = None + self.default_index_document_path = None + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + enabled=generated.enabled, + index_document=generated.index_document, + error_document404_path=generated.error_document404_path, + default_index_document_path=generated.default_index_document_path + ) + + +class CorsRule(GeneratedCorsRule): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + :param list(str) allowed_origins: + A list of origin domains that will be allowed via CORS, or "*" to allow + all domains. The list of must contain at least one entry. Limited to 64 + origin domains. Each allowed origin can have up to 256 characters. + :param list(str) allowed_methods: + A list of HTTP methods that are allowed to be executed by the origin. + The list of must contain at least one entry. For Azure Storage, + permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. + :keyword list(str) allowed_headers: + Defaults to an empty list. A list of headers allowed to be part of + the cross-origin request. Limited to 64 defined headers and 2 prefixed + headers. Each header can be up to 256 characters. + :keyword list(str) exposed_headers: + Defaults to an empty list. A list of response headers to expose to CORS + clients. Limited to 64 defined headers and two prefixed headers. Each + header can be up to 256 characters. + :keyword int max_age_in_seconds: + The number of seconds that the client/browser should cache a + preflight response. + """ + + def __init__(self, allowed_origins, allowed_methods, **kwargs): + self.allowed_origins = ','.join(allowed_origins) + self.allowed_methods = ','.join(allowed_methods) + self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) + self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) + self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) + + @classmethod + def _from_generated(cls, generated): + return cls( + [generated.allowed_origins], + [generated.allowed_methods], + allowed_headers=[generated.allowed_headers], + exposed_headers=[generated.exposed_headers], + max_age_in_seconds=generated.max_age_in_seconds, + ) + + +class ContainerProperties(DictMixin): + """Blob container's properties class. + + Returned ``ContainerProperties`` instances expose these values through a + dictionary interface, for example: ``container_props["last_modified"]``. + Additionally, the container name is available as ``container_props["name"]``. + + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the container was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar ~azure.storage.blob.LeaseProperties lease: + Stores all the lease information for the container. + :ivar str public_access: Specifies whether data in the container may be accessed + publicly and the level of access. + :ivar bool has_immutability_policy: + Represents whether the container has an immutability policy. + :ivar bool has_legal_hold: + Represents whether the container has a legal hold. + :ivar bool immutable_storage_with_versioning_enabled: + Represents whether immutable storage with versioning enabled on the container. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :ivar dict metadata: A dict with name-value pairs to associate with the + container as metadata. + :ivar ~azure.storage.blob.ContainerEncryptionScope encryption_scope: + The default encryption scope configuration for the container. + :ivar bool deleted: + Whether this container was deleted. + :ivar str version: + The version of a deleted container. + """ + + def __init__(self, **kwargs): + self.name = None + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.lease = LeaseProperties(**kwargs) + self.public_access = kwargs.get('x-ms-blob-public-access') + self.has_immutability_policy = kwargs.get('x-ms-has-immutability-policy') + self.deleted = None + self.version = None + self.has_legal_hold = kwargs.get('x-ms-has-legal-hold') + self.metadata = kwargs.get('metadata') + self.encryption_scope = None + self.immutable_storage_with_versioning_enabled = kwargs.get('x-ms-immutable-storage-with-versioning-enabled') + default_encryption_scope = kwargs.get('x-ms-default-encryption-scope') + if default_encryption_scope: + self.encryption_scope = ContainerEncryptionScope( + default_encryption_scope=default_encryption_scope, + prevent_encryption_scope_override=kwargs.get('x-ms-deny-encryption-scope-override', False) + ) + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.last_modified = generated.properties.last_modified + props.etag = generated.properties.etag + props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + props.public_access = generated.properties.public_access + props.has_immutability_policy = generated.properties.has_immutability_policy + props.immutable_storage_with_versioning_enabled = \ + generated.properties.is_immutable_storage_with_versioning_enabled + props.deleted = generated.deleted + props.version = generated.version + props.has_legal_hold = generated.properties.has_legal_hold + props.metadata = generated.metadata + props.encryption_scope = ContainerEncryptionScope._from_generated(generated) #pylint: disable=protected-access + return props + + +class ContainerPropertiesPaged(PageIterator): + """An Iterable of Container properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A container name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.ContainerProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only containers whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of container names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(ContainerPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [self._build_item(item) for item in self._response.container_items] + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_item(item): + return ContainerProperties._from_generated(item) # pylint: disable=protected-access + + +class ImmutabilityPolicy(DictMixin): + """Optional parameters for setting the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword ~datetime.datetime expiry_time: + Specifies the date time when the blobs immutability policy is set to expire. + :keyword str or ~azure.storage.blob.BlobImmutabilityPolicyMode policy_mode: + Specifies the immutability policy mode to set on the blob. + Possible values to set include: "Locked", "Unlocked". + "Mutable" can only be returned by service, don't set to "Mutable". + """ + + def __init__(self, **kwargs): + self.expiry_time = kwargs.pop('expiry_time', None) + self.policy_mode = kwargs.pop('policy_mode', None) + + @classmethod + def _from_generated(cls, generated): + immutability_policy = cls() + immutability_policy.expiry_time = generated.properties.immutability_policy_expires_on + immutability_policy.policy_mode = generated.properties.immutability_policy_mode + return immutability_policy + + +class BlobProperties(DictMixin): + """ + Blob Properties. + + :ivar str name: + The name of the blob. + :ivar str container: + The container in which the blob resides. + :ivar str snapshot: + Datetime value that uniquely identifies the blob snapshot. + :ivar ~azure.blob.storage.BlobType blob_type: + String indicating this blob's type. + :ivar dict metadata: + Name-value pairs associated with the blob as metadata. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the blob was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar int size: + The size of the content returned. If the entire blob was requested, + the length of blob in bytes. If a subset of the blob was requested, the + length of the returned subset. + :ivar str content_range: + Indicates the range of bytes returned in the event that the client + requested a subset of the blob. + :ivar int append_blob_committed_block_count: + (For Append Blobs) Number of committed blocks in the blob. + :ivar bool is_append_blob_sealed: + Indicate if the append blob is sealed or not. + + .. versionadded:: 12.4.0 + + :ivar int page_blob_sequence_number: + (For Page Blobs) Sequence number for page blob used for coordinating + concurrent writes. + :ivar bool server_encrypted: + Set to true if the blob is encrypted on the server. + :ivar ~azure.storage.blob.CopyProperties copy: + Stores all the copy properties for the blob. + :ivar ~azure.storage.blob.ContentSettings content_settings: + Stores all the content settings for the blob. + :ivar ~azure.storage.blob.LeaseProperties lease: + Stores all the lease information for the blob. + :ivar ~azure.storage.blob.StandardBlobTier blob_tier: + Indicates the access tier of the blob. The hot tier is optimized + for storing data that is accessed frequently. The cool storage tier + is optimized for storing data that is infrequently accessed and stored + for at least a month. The archive tier is optimized for storing + data that is rarely accessed and stored for at least six months + with flexible latency requirements. + :ivar str rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :ivar ~datetime.datetime blob_tier_change_time: + Indicates when the access tier was last changed. + :ivar bool blob_tier_inferred: + Indicates whether the access tier was inferred by the service. + If false, it indicates that the tier was set explicitly. + :ivar bool deleted: + Whether this blob was deleted. + :ivar ~datetime.datetime deleted_time: + A datetime object representing the time at which the blob was deleted. + :ivar int remaining_retention_days: + The number of days that the blob will be retained before being permanently deleted by the service. + :ivar ~datetime.datetime creation_time: + Indicates when the blob was created, in UTC. + :ivar str archive_status: + Archive status of blob. + :ivar str encryption_key_sha256: + The SHA-256 hash of the provided encryption key. + :ivar str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + :ivar bool request_server_encrypted: + Whether this blob is encrypted. + :ivar list(~azure.storage.blob.ObjectReplicationPolicy) object_replication_source_properties: + Only present for blobs that have policy ids and rule ids applied to them. + + .. versionadded:: 12.4.0 + + :ivar str object_replication_destination_policy: + Represents the Object Replication Policy Id that created this blob. + + .. versionadded:: 12.4.0 + + :ivar ~datetime.datetime last_accessed_on: + Indicates when the last Read/Write operation was performed on a Blob. + + .. versionadded:: 12.6.0 + + :ivar int tag_count: + Tags count on this blob. + + .. versionadded:: 12.4.0 + + :ivar dict(str, str) tags: + Key value pair of tags on this blob. + + .. versionadded:: 12.4.0 + :ivar bool has_versions_only: + A true value indicates the root blob is deleted + + .. versionadded:: 12.10.0 + + :ivar ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :ivar bool has_legal_hold: + Specified if a legal hold should be set on the blob. + Currently this parameter of upload_blob() API is for BlockBlob only. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + """ + + def __init__(self, **kwargs): + self.name = kwargs.get('name') + self.container = None + self.snapshot = kwargs.get('x-ms-snapshot') + self.version_id = kwargs.get('x-ms-version-id') + self.is_current_version = kwargs.get('x-ms-is-current-version') + self.blob_type = BlobType(kwargs['x-ms-blob-type']) if kwargs.get('x-ms-blob-type') else None + self.metadata = kwargs.get('metadata') + self.encrypted_metadata = kwargs.get('encrypted_metadata') + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.size = kwargs.get('Content-Length') + self.content_range = kwargs.get('Content-Range') + self.append_blob_committed_block_count = kwargs.get('x-ms-blob-committed-block-count') + self.is_append_blob_sealed = kwargs.get('x-ms-blob-sealed') + self.page_blob_sequence_number = kwargs.get('x-ms-blob-sequence-number') + self.server_encrypted = kwargs.get('x-ms-server-encrypted') + self.copy = CopyProperties(**kwargs) + self.content_settings = ContentSettings(**kwargs) + self.lease = LeaseProperties(**kwargs) + self.blob_tier = kwargs.get('x-ms-access-tier') + self.rehydrate_priority = kwargs.get('x-ms-rehydrate-priority') + self.blob_tier_change_time = kwargs.get('x-ms-access-tier-change-time') + self.blob_tier_inferred = kwargs.get('x-ms-access-tier-inferred') + self.deleted = False + self.deleted_time = None + self.remaining_retention_days = None + self.creation_time = kwargs.get('x-ms-creation-time') + self.archive_status = kwargs.get('x-ms-archive-status') + self.encryption_key_sha256 = kwargs.get('x-ms-encryption-key-sha256') + self.encryption_scope = kwargs.get('x-ms-encryption-scope') + self.request_server_encrypted = kwargs.get('x-ms-server-encrypted') + self.object_replication_source_properties = kwargs.get('object_replication_source_properties') + self.object_replication_destination_policy = kwargs.get('x-ms-or-policy-id') + self.last_accessed_on = kwargs.get('x-ms-last-access-time') + self.tag_count = kwargs.get('x-ms-tag-count') + self.tags = None + self.immutability_policy = ImmutabilityPolicy(expiry_time=kwargs.get('x-ms-immutability-policy-until-date'), + policy_mode=kwargs.get('x-ms-immutability-policy-mode')) + self.has_legal_hold = kwargs.get('x-ms-legal-hold') + self.has_versions_only = None + + +class FilteredBlob(DictMixin): + """Blob info from a Filter Blobs API call. + + :ivar name: Blob name + :type name: str + :ivar container_name: Container name. + :type container_name: str + :ivar tags: Key value pairs of blob tags. + :type tags: Dict[str, str] + """ + def __init__(self, **kwargs): + self.name = kwargs.get('name', None) + self.container_name = kwargs.get('container_name', None) + self.tags = kwargs.get('tags', None) + + +class LeaseProperties(DictMixin): + """Blob Lease Properties. + + :ivar str status: + The lease status of the blob. Possible values: locked|unlocked + :ivar str state: + Lease state of the blob. Possible values: available|leased|expired|breaking|broken + :ivar str duration: + When a blob is leased, specifies whether the lease is of infinite or fixed duration. + """ + + def __init__(self, **kwargs): + self.status = get_enum_value(kwargs.get('x-ms-lease-status')) + self.state = get_enum_value(kwargs.get('x-ms-lease-state')) + self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) + + @classmethod + def _from_generated(cls, generated): + lease = cls() + lease.status = get_enum_value(generated.properties.lease_status) + lease.state = get_enum_value(generated.properties.lease_state) + lease.duration = get_enum_value(generated.properties.lease_duration) + return lease + + +class ContentSettings(DictMixin): + """The content settings of a blob. + + :param str content_type: + The content type specified for the blob. If no content type was + specified, the default content type is application/octet-stream. + :param str content_encoding: + If the content_encoding has previously been set + for the blob, that value is stored. + :param str content_language: + If the content_language has previously been set + for the blob, that value is stored. + :param str content_disposition: + content_disposition conveys additional information about how to + process the response payload, and also can be used to attach + additional metadata. If content_disposition has previously been set + for the blob, that value is stored. + :param str cache_control: + If the cache_control has previously been set for + the blob, that value is stored. + :param bytearray content_md5: + If the content_md5 has been set for the blob, this response + header is stored so that the client can check for message content + integrity. + """ + + def __init__( + self, content_type=None, content_encoding=None, + content_language=None, content_disposition=None, + cache_control=None, content_md5=None, **kwargs): + + self.content_type = content_type or kwargs.get('Content-Type') + self.content_encoding = content_encoding or kwargs.get('Content-Encoding') + self.content_language = content_language or kwargs.get('Content-Language') + self.content_md5 = content_md5 or kwargs.get('Content-MD5') + self.content_disposition = content_disposition or kwargs.get('Content-Disposition') + self.cache_control = cache_control or kwargs.get('Cache-Control') + + @classmethod + def _from_generated(cls, generated): + settings = cls() + settings.content_type = generated.properties.content_type or None + settings.content_encoding = generated.properties.content_encoding or None + settings.content_language = generated.properties.content_language or None + settings.content_md5 = generated.properties.content_md5 or None + settings.content_disposition = generated.properties.content_disposition or None + settings.cache_control = generated.properties.cache_control or None + return settings + + +class CopyProperties(DictMixin): + """Blob Copy Properties. + + These properties will be `None` if this blob has never been the destination + in a Copy Blob operation, or if this blob has been modified after a concluded + Copy Blob operation, for example, using Set Blob Properties, Upload Blob, or Commit Block List. + + :ivar str id: + String identifier for the last attempted Copy Blob operation where this blob + was the destination blob. + :ivar str source: + URL up to 2 KB in length that specifies the source blob used in the last attempted + Copy Blob operation where this blob was the destination blob. + :ivar str status: + State of the copy operation identified by Copy ID, with these values: + success: + Copy completed successfully. + pending: + Copy is in progress. Check copy_status_description if intermittent, + non-fatal errors impede copy progress but don't cause failure. + aborted: + Copy was ended by Abort Copy Blob. + failed: + Copy failed. See copy_status_description for failure details. + :ivar str progress: + Contains the number of bytes copied and the total bytes in the source in the last + attempted Copy Blob operation where this blob was the destination blob. Can show + between 0 and Content-Length bytes copied. + :ivar ~datetime.datetime completion_time: + Conclusion time of the last attempted Copy Blob operation where this blob was the + destination blob. This value can specify the time of a completed, aborted, or + failed copy attempt. + :ivar str status_description: + Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal + or non-fatal copy operation failure. + :ivar bool incremental_copy: + Copies the snapshot of the source page blob to a destination page blob. + The snapshot is copied such that only the differential changes between + the previously copied snapshot are transferred to the destination + :ivar ~datetime.datetime destination_snapshot: + Included if the blob is incremental copy blob or incremental copy snapshot, + if x-ms-copy-status is success. Snapshot time of the last successful + incremental copy snapshot for this blob. + """ + + def __init__(self, **kwargs): + self.id = kwargs.get('x-ms-copy-id') + self.source = kwargs.get('x-ms-copy-source') + self.status = get_enum_value(kwargs.get('x-ms-copy-status')) + self.progress = kwargs.get('x-ms-copy-progress') + self.completion_time = kwargs.get('x-ms-copy-completion_time') + self.status_description = kwargs.get('x-ms-copy-status-description') + self.incremental_copy = kwargs.get('x-ms-incremental-copy') + self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') + + @classmethod + def _from_generated(cls, generated): + copy = cls() + copy.id = generated.properties.copy_id or None + copy.status = get_enum_value(generated.properties.copy_status) or None + copy.source = generated.properties.copy_source or None + copy.progress = generated.properties.copy_progress or None + copy.completion_time = generated.properties.copy_completion_time or None + copy.status_description = generated.properties.copy_status_description or None + copy.incremental_copy = generated.properties.incremental_copy or None + copy.destination_snapshot = generated.properties.destination_snapshot or None + return copy + + +class BlobBlock(DictMixin): + """BlockBlob Block class. + + :param str block_id: + Block id. + :param str state: + Block state. Possible values: committed|uncommitted + :ivar int size: + Block size in bytes. + """ + + def __init__(self, block_id, state=BlockState.Latest): + self.id = block_id + self.state = state + self.size = None + + @classmethod + def _from_generated(cls, generated): + try: + decoded_bytes = decode_base64_to_bytes(generated.name) + block_id = decoded_bytes.decode('utf-8') + # this is to fix a bug. When large blocks are uploaded through upload_blob the block id isn't base64 encoded + # while service expected block id is base64 encoded, so when we get block_id if we cannot base64 decode, it + # means we didn't base64 encode it when stage the block, we want to use the returned block_id directly. + except UnicodeDecodeError: + block_id = generated.name + block = cls(block_id) + block.size = generated.size + return block + + +class PageRange(DictMixin): + """Page Range for page blob. + + :param int start: + Start of page range in bytes. + :param int end: + End of page range in bytes. + """ + + def __init__(self, start=None, end=None): + self.start = start + self.end = end + + +class AccessPolicy(GenAccessPolicy): + """Access Policy class used by the set and get access policy methods in each service. + + A stored access policy can specify the start time, expiry time, and + permissions for the Shared Access Signatures with which it's associated. + Depending on how you want to control access to your resource, you can + specify all of these parameters within the stored access policy, and omit + them from the URL for the Shared Access Signature. Doing so permits you to + modify the associated signature's behavior at any time, as well as to revoke + it. Or you can specify one or more of the access policy parameters within + the stored access policy, and the others on the URL. Finally, you can + specify all of the parameters on the URL. In this case, you can use the + stored access policy to revoke the signature, but not to modify its behavior. + + Together the Shared Access Signature and the stored access policy must + include all fields required to authenticate the signature. If any required + fields are missing, the request will fail. Likewise, if a field is specified + both in the Shared Access Signature URL and in the stored access policy, the + request will fail with status code 400 (Bad Request). + + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.blob.ContainerSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + """ + def __init__(self, permission=None, expiry=None, start=None): + self.start = start + self.expiry = expiry + self.permission = permission + + +class ContainerSasPermissions(object): + """ContainerSasPermissions class to be used with the + :func:`~azure.storage.blob.generate_container_sas` function and + for the AccessPolicies used with + :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. + + :param bool read: + Read the content, properties, metadata or block list of any blob in the + container. Use any blob in the container as the source of a copy operation. + :param bool write: + For any blob in the container, create or write content, properties, + metadata, or block list. Snapshot or lease the blob. Resize the blob + (page blob only). Use the blob as the destination of a copy operation + within the same account. Note: You cannot grant permissions to read or + write container properties or metadata, nor to lease a container, with + a container SAS. Use an account SAS instead. + :param bool delete: + Delete any blob in the container. Note: You cannot grant permissions to + delete a container with a container SAS. Use an account SAS instead. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool list: + List blobs in the container. + :param bool tag: + Set or get tags on the blobs in the container. + :keyword bool set_immutability_policy: + To enable operations related to set/delete immutability policy. + To get immutability policy, you just need read permission. + """ + def __init__(self, read=False, write=False, delete=False, + list=False, delete_previous_version=False, tag=False, **kwargs): # pylint: disable=redefined-builtin + self.read = read + self.write = write + self.delete = delete + self.list = list + self.delete_previous_version = delete_previous_version + self.tag = tag + self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('l' if self.list else '') + + ('t' if self.tag else '') + + ('i' if self.set_immutability_policy else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a ContainerSasPermissions from a string. + + To specify read, write, delete, or list permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, write, delete, + and list permissions. + :return: A ContainerSasPermissions object + :rtype: ~azure.storage.blob.ContainerSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_list = 'l' in permission + p_delete_previous_version = 'x' in permission + p_tag = 't' in permission + p_set_immutability_policy = 'i' in permission + parsed = cls(read=p_read, write=p_write, delete=p_delete, list=p_list, + delete_previous_version=p_delete_previous_version, tag=p_tag, + set_immutability_policy=p_set_immutability_policy) + + return parsed + + +class BlobSasPermissions(object): + """BlobSasPermissions class to be used with the + :func:`~azure.storage.blob.generate_blob_sas` function. + + :param bool read: + Read the content, properties, metadata and block list. Use the blob as + the source of a copy operation. + :param bool add: + Add a block to an append blob. + :param bool create: + Write a new blob, snapshot a blob, or copy a blob to a new blob. + :param bool write: + Create or write content, properties, metadata, or block list. Snapshot + or lease the blob. Resize the blob (page blob only). Use the blob as the + destination of a copy operation within the same account. + :param bool delete: + Delete the blob. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool tag: + Set or get tags on the blob. + :keyword bool set_immutability_policy: + To enable operations related to set/delete immutability policy. + To get immutability policy, you just need read permission. + """ + def __init__(self, read=False, add=False, create=False, write=False, + delete=False, delete_previous_version=False, tag=True, **kwargs): + self.read = read + self.add = add + self.create = create + self.write = write + self.delete = delete + self.delete_previous_version = delete_previous_version + self.tag = tag + self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) + self._str = (('r' if self.read else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('t' if self.tag else '') + + ('i' if self.set_immutability_policy else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a BlobSasPermissions from a string. + + To specify read, add, create, write, or delete permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, add, create, + write, or delete permissions. + :return: A BlobSasPermissions object + :rtype: ~azure.storage.blob.BlobSasPermissions + """ + p_read = 'r' in permission + p_add = 'a' in permission + p_create = 'c' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission + p_tag = 't' in permission + p_set_immutability_policy = 'i' in permission + + parsed = cls(read=p_read, add=p_add, create=p_create, write=p_write, delete=p_delete, + delete_previous_version=p_delete_previous_version, tag=p_tag, + set_immutability_policy=p_set_immutability_policy) + + return parsed + + +class CustomerProvidedEncryptionKey(object): + """ + All data in Azure Storage is encrypted at-rest using an account-level encryption key. + In versions 2018-06-17 and newer, you can manage the key used to encrypt blob contents + and application metadata per-blob by providing an AES-256 encryption key in requests to the storage service. + + When you use a customer-provided key, Azure Storage does not manage or persist your key. + When writing data to a blob, the provided key is used to encrypt your data before writing it to disk. + A SHA-256 hash of the encryption key is written alongside the blob contents, + and is used to verify that all subsequent operations against the blob use the same encryption key. + This hash cannot be used to retrieve the encryption key or decrypt the contents of the blob. + When reading a blob, the provided key is used to decrypt your data after reading it from disk. + In both cases, the provided encryption key is securely discarded + as soon as the encryption or decryption process completes. + + :param str key_value: + Base64-encoded AES-256 encryption key value. + :param str key_hash: + Base64-encoded SHA256 of the encryption key. + :ivar str algorithm: + Specifies the algorithm to use when encrypting data using the given key. Must be AES256. + """ + def __init__(self, key_value, key_hash): + self.key_value = key_value + self.key_hash = key_hash + self.algorithm = 'AES256' + + +class ContainerEncryptionScope(object): + """The default encryption scope configuration for a container. + + This scope is used implicitly for all future writes within the container, + but can be overridden per blob operation. + + .. versionadded:: 12.2.0 + + :param str default_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + :param bool prevent_encryption_scope_override: + If true, prevents any request from specifying a different encryption scope than the scope + set on the container. Default value is false. + """ + + def __init__(self, default_encryption_scope, **kwargs): + self.default_encryption_scope = default_encryption_scope + self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', False) + + @classmethod + def _from_generated(cls, generated): + if generated.properties.default_encryption_scope: + scope = cls( + generated.properties.default_encryption_scope, + prevent_encryption_scope_override=generated.properties.prevent_encryption_scope_override or False + ) + return scope + return None + + +class DelimitedJsonDialect(DictMixin): + """Defines the input or output JSON serialization for a blob data query. + + :keyword str delimiter: The line separator character, default value is '\n' + """ + + def __init__(self, **kwargs): + self.delimiter = kwargs.pop('delimiter', '\n') + + +class DelimitedTextDialect(DictMixin): + """Defines the input or output delimited (CSV) serialization for a blob query request. + + :keyword str delimiter: + Column separator, defaults to ','. + :keyword str quotechar: + Field quote, defaults to '"'. + :keyword str lineterminator: + Record separator, defaults to '\n'. + :keyword str escapechar: + Escape char, defaults to empty. + :keyword bool has_header: + Whether the blob data includes headers in the first line. The default value is False, meaning that the + data will be returned inclusive of the first line. If set to True, the data will be returned exclusive + of the first line. + """ + def __init__(self, **kwargs): + self.delimiter = kwargs.pop('delimiter', ',') + self.quotechar = kwargs.pop('quotechar', '"') + self.lineterminator = kwargs.pop('lineterminator', '\n') + self.escapechar = kwargs.pop('escapechar', "") + self.has_header = kwargs.pop('has_header', False) + + +class ArrowDialect(ArrowField): + """field of an arrow schema. + + All required parameters must be populated in order to send to Azure. + + :param ~azure.storage.blob.ArrowType type: Arrow field type. + :keyword str name: The name of the field. + :keyword int precision: The precision of the field. + :keyword int scale: The scale of the field. + """ + def __init__(self, type, **kwargs): # pylint: disable=redefined-builtin + super(ArrowDialect, self).__init__(type=type, **kwargs) + + +class ArrowType(str, Enum): + + INT64 = "int64" + BOOL = "bool" + TIMESTAMP_MS = "timestamp[ms]" + STRING = "string" + DOUBLE = "double" + DECIMAL = 'decimal' + + +class ObjectReplicationPolicy(DictMixin): + """Policy id and rule ids applied to a blob. + + :ivar str policy_id: + Policy id for the blob. A replication policy gets created (policy id) when creating a source/destination pair. + :ivar list(~azure.storage.blob.ObjectReplicationRule) rules: + Within each policy there may be multiple replication rules. + e.g. rule 1= src/container/.pdf to dst/container2/; rule2 = src/container1/.jpg to dst/container3 + """ + + def __init__(self, **kwargs): + self.policy_id = kwargs.pop('policy_id', None) + self.rules = kwargs.pop('rules', None) + + +class ObjectReplicationRule(DictMixin): + """Policy id and rule ids applied to a blob. + + :ivar str rule_id: + Rule id. + :ivar str status: + The status of the rule. It could be "Complete" or "Failed" + """ + + def __init__(self, **kwargs): + self.rule_id = kwargs.pop('rule_id', None) + self.status = kwargs.pop('status', None) + + +class BlobQueryError(object): + """The error happened during quick query operation. + + :ivar str error: + The name of the error. + :ivar bool is_fatal: + If true, this error prevents further query processing. More result data may be returned, + but there is no guarantee that all of the original data will be processed. + If false, this error does not prevent further query processing. + :ivar str description: + A description of the error. + :ivar int position: + The blob offset at which the error occurred. + """ + def __init__(self, error=None, is_fatal=False, description=None, position=None): + self.error = error + self.is_fatal = is_fatal + self.description = description + self.position = position diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_quick_query_helper.py b/azure/multiapi/storagev2/blob/v2020_10_02/_quick_query_helper.py new file mode 100644 index 0000000..3164337 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_quick_query_helper.py @@ -0,0 +1,195 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from io import BytesIO +from typing import Union, Iterable, IO # pylint: disable=unused-import + +from ._shared.avro.datafile import DataFileReader +from ._shared.avro.avro_io import DatumReader + + +class BlobQueryReader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to read query results. + + :ivar str name: + The name of the blob being quered. + :ivar str container: + The name of the container where the blob is. + :ivar dict response_headers: + The response_headers of the quick query request. + :ivar bytes record_delimiter: + The delimiter used to separate lines, or records with the data. The `records` + method will return these lines via a generator. + """ + + def __init__( + self, + name=None, + container=None, + errors=None, + record_delimiter='\n', + encoding=None, + headers=None, + response=None, + error_cls=None, + ): + self.name = name + self.container = container + self.response_headers = headers + self.record_delimiter = record_delimiter + self._size = 0 + self._bytes_processed = 0 + self._errors = errors + self._encoding = encoding + self._parsed_results = DataFileReader(QuickQueryStreamer(response), DatumReader()) + self._first_result = self._process_record(next(self._parsed_results)) + self._error_cls = error_cls + + def __len__(self): + return self._size + + def _process_record(self, result): + self._size = result.get('totalBytes', self._size) + self._bytes_processed = result.get('bytesScanned', self._bytes_processed) + if 'data' in result: + return result.get('data') + if 'fatal' in result: + error = self._error_cls( + error=result['name'], + is_fatal=result['fatal'], + description=result['description'], + position=result['position'] + ) + if self._errors: + self._errors(error) + return None + + def _iter_stream(self): + if self._first_result is not None: + yield self._first_result + for next_result in self._parsed_results: + processed_result = self._process_record(next_result) + if processed_result is not None: + yield processed_result + + def readall(self): + # type: () -> Union[bytes, str] + """Return all query results. + + This operation is blocking until all data is downloaded. + If encoding has been configured - this will be used to decode individual + records are they are received. + + :rtype: Union[bytes, str] + """ + stream = BytesIO() + self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + def readinto(self, stream): + # type: (IO) -> None + """Download the query result to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. + :returns: None + """ + for record in self._iter_stream(): + stream.write(record) + + def records(self): + # type: () -> Iterable[Union[bytes, str]] + """Returns a record generator for the query result. + + Records will be returned line by line. + If encoding has been configured - this will be used to decode individual + records are they are received. + + :rtype: Iterable[Union[bytes, str]] + """ + delimiter = self.record_delimiter.encode('utf-8') + for record_chunk in self._iter_stream(): + for record in record_chunk.split(delimiter): + if self._encoding: + yield record.decode(self._encoding) + else: + yield record + + +class QuickQueryStreamer(object): + """ + File-like streaming iterator. + """ + + def __init__(self, generator): + self.generator = generator + self.iterator = iter(generator) + self._buf = b"" + self._point = 0 + self._download_offset = 0 + self._buf_start = 0 + self.file_length = None + + def __len__(self): + return self.file_length + + def __iter__(self): + return self.iterator + + @staticmethod + def seekable(): + return True + + def __next__(self): + next_part = next(self.iterator) + self._download_offset += len(next_part) + return next_part + + next = __next__ # Python 2 compatibility. + + def tell(self): + return self._point + + def seek(self, offset, whence=0): + if whence == 0: + self._point = offset + elif whence == 1: + self._point += offset + else: + raise ValueError("whence must be 0, or 1") + if self._point < 0: + self._point = 0 # XXX is this right? + + def read(self, size): + try: + # keep reading from the generator until the buffer of this stream has enough data to read + while self._point + size > self._download_offset: + self._buf += self.__next__() + except StopIteration: + self.file_length = self._download_offset + + start_point = self._point + + # EOF + self._point = min(self._point + size, self._download_offset) + + relative_start = start_point - self._buf_start + if relative_start < 0: + raise ValueError("Buffer has dumped too much data") + relative_end = relative_start + size + data = self._buf[relative_start: relative_end] + + # dump the extra data in buffer + # buffer start--------------------16bytes----current read position + dumped_size = max(relative_end - 16 - relative_start, 0) + self._buf_start += dumped_size + self._buf = self._buf[dumped_size:] + + return data diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_serialize.py b/azure/multiapi/storagev2/blob/v2020_10_02/_serialize.py new file mode 100644 index 0000000..d44c5ad --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_serialize.py @@ -0,0 +1,205 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use +try: + from urllib.parse import quote +except ImportError: + from urllib2 import quote # type: ignore + +from azure.core import MatchConditions + +from ._models import ( + ContainerEncryptionScope, + DelimitedJsonDialect) +from ._generated.models import ( + ModifiedAccessConditions, + SourceModifiedAccessConditions, + CpkScopeInfo, + ContainerCpkScopeInfo, + QueryFormat, + QuerySerialization, + DelimitedTextConfiguration, + JsonTextConfiguration, + ArrowConfiguration, + QueryFormatType, + BlobTag, + BlobTags, LeaseAccessConditions +) + + +_SUPPORTED_API_VERSIONS = [ + '2019-02-02', + '2019-07-07', + '2019-10-10', + '2019-12-12', + '2020-02-10', + '2020-04-08', + '2020-06-12', + '2020-08-04', + '2020-10-02' +] + + +def _get_match_headers(kwargs, match_param, etag_param): + # type: (Dict[str, Any], str, str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) + if_match = None + if_none_match = None + match_condition = kwargs.pop(match_param, None) + if match_condition == MatchConditions.IfNotModified: + if_match = kwargs.pop(etag_param, None) + if not if_match: + raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) + elif match_condition == MatchConditions.IfPresent: + if_match = '*' + elif match_condition == MatchConditions.IfModified: + if_none_match = kwargs.pop(etag_param, None) + if not if_none_match: + raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) + elif match_condition == MatchConditions.IfMissing: + if_none_match = '*' + elif match_condition is None: + if kwargs.get(etag_param): + raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) + else: + raise TypeError("Invalid match condition: {}".format(match_condition)) + return if_match, if_none_match + + +def get_access_conditions(lease): + # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] + try: + lease_id = lease.id # type: ignore + except AttributeError: + lease_id = lease # type: ignore + return LeaseAccessConditions(lease_id=lease_id) if lease_id else None + + +def get_modify_conditions(kwargs): + # type: (Dict[str, Any]) -> ModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') + return ModifiedAccessConditions( + if_modified_since=kwargs.pop('if_modified_since', None), + if_unmodified_since=kwargs.pop('if_unmodified_since', None), + if_match=if_match or kwargs.pop('if_match', None), + if_none_match=if_none_match or kwargs.pop('if_none_match', None), + if_tags=kwargs.pop('if_tags_match_condition', None) + ) + + +def get_source_conditions(kwargs): + # type: (Dict[str, Any]) -> SourceModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') + return SourceModifiedAccessConditions( + source_if_modified_since=kwargs.pop('source_if_modified_since', None), + source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), + source_if_match=if_match or kwargs.pop('source_if_match', None), + source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None), + source_if_tags=kwargs.pop('source_if_tags_match_condition', None) + ) + + +def get_cpk_scope_info(kwargs): + # type: (Dict[str, Any]) -> CpkScopeInfo + if 'encryption_scope' in kwargs: + return CpkScopeInfo(encryption_scope=kwargs.pop('encryption_scope')) + return None + + +def get_container_cpk_scope_info(kwargs): + # type: (Dict[str, Any]) -> ContainerCpkScopeInfo + encryption_scope = kwargs.pop('container_encryption_scope', None) + if encryption_scope: + if isinstance(encryption_scope, ContainerEncryptionScope): + return ContainerCpkScopeInfo( + default_encryption_scope=encryption_scope.default_encryption_scope, + prevent_encryption_scope_override=encryption_scope.prevent_encryption_scope_override + ) + if isinstance(encryption_scope, dict): + return ContainerCpkScopeInfo( + default_encryption_scope=encryption_scope['default_encryption_scope'], + prevent_encryption_scope_override=encryption_scope.get('prevent_encryption_scope_override') + ) + raise TypeError("Container encryption scope must be dict or type ContainerEncryptionScope.") + return None + + +def get_api_version(kwargs): + # type: (Dict[str, Any], str) -> str + api_version = kwargs.get('api_version', None) + if api_version and api_version not in _SUPPORTED_API_VERSIONS: + versions = '\n'.join(_SUPPORTED_API_VERSIONS) + raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) + return api_version or _SUPPORTED_API_VERSIONS[-1] + + +def serialize_blob_tags_header(tags=None): + # type: (Optional[Dict[str, str]]) -> str + if tags is None: + return None + + components = list() + if tags: + for key, value in tags.items(): + components.append(quote(key, safe='.-')) + components.append('=') + components.append(quote(value, safe='.-')) + components.append('&') + + if components: + del components[-1] + + return ''.join(components) + + +def serialize_blob_tags(tags=None): + # type: (Optional[Dict[str, str]]) -> Union[BlobTags, None] + tag_list = list() + if tags: + tag_list = [BlobTag(key=k, value=v) for k, v in tags.items()] + return BlobTags(blob_tag_set=tag_list) + + +def serialize_query_format(formater): + if formater == "ParquetDialect": + qq_format = QueryFormat( + type=QueryFormatType.PARQUET, + parquet_text_configuration=' ' + ) + elif isinstance(formater, DelimitedJsonDialect): + serialization_settings = JsonTextConfiguration( + record_separator=formater.delimiter + ) + qq_format = QueryFormat( + type=QueryFormatType.json, + json_text_configuration=serialization_settings) + elif hasattr(formater, 'quotechar'): # This supports a csv.Dialect as well + try: + headers = formater.has_header + except AttributeError: + headers = False + serialization_settings = DelimitedTextConfiguration( + column_separator=formater.delimiter, + field_quote=formater.quotechar, + record_separator=formater.lineterminator, + escape_char=formater.escapechar, + headers_present=headers + ) + qq_format = QueryFormat( + type=QueryFormatType.delimited, + delimited_text_configuration=serialization_settings + ) + elif isinstance(formater, list): + serialization_settings = ArrowConfiguration( + schema=formater + ) + qq_format = QueryFormat( + type=QueryFormatType.arrow, + arrow_configuration=serialization_settings) + elif not formater: + return None + else: + raise TypeError("Format must be DelimitedTextDialect or DelimitedJsonDialect or ParquetDialect.") + return QuerySerialization(format=qq_format) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/__init__.py new file mode 100644 index 0000000..160f882 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/__init__.py @@ -0,0 +1,56 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import hmac + +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore + +import six + + +def url_quote(url): + return quote(url) + + +def url_unquote(url): + return unquote(url) + + +def encode_base64(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def decode_base64_to_bytes(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + return base64.b64decode(data) + + +def decode_base64_to_text(data): + decoded_bytes = decode_base64_to_bytes(data) + return decoded_bytes.decode('utf-8') + + +def sign_string(key, string_to_sign, key_is_base64=True): + if key_is_base64: + key = decode_base64_to_bytes(key) + else: + if isinstance(key, six.text_type): + key = key.encode('utf-8') + if isinstance(string_to_sign, six.text_type): + string_to_sign = string_to_sign.encode('utf-8') + signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) + digest = signed_hmac_sha256.digest() + encoded_digest = encode_base64(digest) + return encoded_digest diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/authentication.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/authentication.py new file mode 100644 index 0000000..d04c1e4 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/authentication.py @@ -0,0 +1,142 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import logging +import sys + +try: + from urllib.parse import urlparse, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import unquote # type: ignore + +try: + from yarl import URL +except ImportError: + pass + +try: + from azure.core.pipeline.transport import AioHttpTransport +except ImportError: + AioHttpTransport = None + +from azure.core.exceptions import ClientAuthenticationError +from azure.core.pipeline.policies import SansIOHTTPPolicy + +from . import sign_string + + +logger = logging.getLogger(__name__) + + + +# wraps a given exception with the desired exception type +def _wrap_exception(ex, desired_type): + msg = "" + if ex.args: + msg = ex.args[0] + if sys.version_info >= (3,): + # Automatic chaining in Python 3 means we keep the trace + return desired_type(msg) + # There isn't a good solution in 2 for keeping the stack trace + # in general, or that will not result in an error in 3 + # However, we can keep the previous error type and message + # TODO: In the future we will log the trace + return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) + + +class AzureSigningError(ClientAuthenticationError): + """ + Represents a fatal error when attempting to sign a request. + In general, the cause of this exception is user error. For example, the given account key is not valid. + Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. + """ + + +# pylint: disable=no-self-use +class SharedKeyCredentialPolicy(SansIOHTTPPolicy): + + def __init__(self, account_name, account_key): + self.account_name = account_name + self.account_key = account_key + super(SharedKeyCredentialPolicy, self).__init__() + + @staticmethod + def _get_headers(request, headers_to_sign): + headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) + if 'content-length' in headers and headers['content-length'] == '0': + del headers['content-length'] + return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' + + @staticmethod + def _get_verb(request): + return request.http_request.method + '\n' + + def _get_canonicalized_resource(self, request): + uri_path = urlparse(request.http_request.url).path + try: + if isinstance(request.context.transport, AioHttpTransport) or \ + isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ + isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), + AioHttpTransport): + uri_path = URL(uri_path) + return '/' + self.account_name + str(uri_path) + except TypeError: + pass + return '/' + self.account_name + uri_path + + @staticmethod + def _get_canonicalized_headers(request): + string_to_sign = '' + x_ms_headers = [] + for name, value in request.http_request.headers.items(): + if name.startswith('x-ms-'): + x_ms_headers.append((name.lower(), value)) + x_ms_headers.sort() + for name, value in x_ms_headers: + if value is not None: + string_to_sign += ''.join([name, ':', value, '\n']) + return string_to_sign + + @staticmethod + def _get_canonicalized_resource_query(request): + sorted_queries = list(request.http_request.query.items()) + sorted_queries.sort() + + string_to_sign = '' + for name, value in sorted_queries: + if value is not None: + string_to_sign += '\n' + name.lower() + ':' + unquote(value) + + return string_to_sign + + def _add_authorization_header(self, request, string_to_sign): + try: + signature = sign_string(self.account_key, string_to_sign) + auth_string = 'SharedKey ' + self.account_name + ':' + signature + request.http_request.headers['Authorization'] = auth_string + except Exception as ex: + # Wrap any error that occurred as signing error + # Doing so will clarify/locate the source of problem + raise _wrap_exception(ex, AzureSigningError) + + def on_request(self, request): + string_to_sign = \ + self._get_verb(request) + \ + self._get_headers( + request, + [ + 'content-encoding', 'content-language', 'content-length', + 'content-md5', 'content-type', 'date', 'if-modified-since', + 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' + ] + ) + \ + self._get_canonicalized_headers(request) + \ + self._get_canonicalized_resource(request) + \ + self._get_canonicalized_resource_query(request) + + self._add_authorization_header(request, string_to_sign) + #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/__init__.py new file mode 100644 index 0000000..5b396cd --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/__init__.py @@ -0,0 +1,5 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/avro_io.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/avro_io.py new file mode 100644 index 0000000..93a5c13 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/avro_io.py @@ -0,0 +1,464 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Input/output utilities. + +Includes: + - i/o-specific constants + - i/o-specific exceptions + - schema validation + - leaf value encoding and decoding + - datum reader/writer stuff (?) + +Also includes a generic representation for data, which uses the +following mapping: + - Schema records are implemented as dict. + - Schema arrays are implemented as list. + - Schema maps are implemented as dict. + - Schema strings are implemented as unicode. + - Schema bytes are implemented as str. + - Schema ints are implemented as int. + - Schema longs are implemented as long. + - Schema floats are implemented as float. + - Schema doubles are implemented as float. + - Schema booleans are implemented as bool. +""" + +import json +import logging +import struct +import sys + +from ..avro import schema + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +STRUCT_FLOAT = struct.Struct('= 0), n + input_bytes = self.reader.read(n) + if n > 0 and not input_bytes: + raise StopIteration + assert (len(input_bytes) == n), input_bytes + return input_bytes + + @staticmethod + def read_null(): + """ + null is written as zero bytes + """ + return None + + def read_boolean(self): + """ + a boolean is written as a single byte + whose value is either 0 (false) or 1 (true). + """ + b = ord(self.read(1)) + if b == 1: + return True + if b == 0: + return False + fail_msg = "Invalid value for boolean: %s" % b + raise schema.AvroException(fail_msg) + + def read_int(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + return self.read_long() + + def read_long(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + b = ord(self.read(1)) + n = b & 0x7F + shift = 7 + while (b & 0x80) != 0: + b = ord(self.read(1)) + n |= (b & 0x7F) << shift + shift += 7 + datum = (n >> 1) ^ -(n & 1) + return datum + + def read_float(self): + """ + A float is written as 4 bytes. + The float is converted into a 32-bit integer using a method equivalent to + Java's floatToIntBits and then encoded in little-endian format. + """ + return STRUCT_FLOAT.unpack(self.read(4))[0] + + def read_double(self): + """ + A double is written as 8 bytes. + The double is converted into a 64-bit integer using a method equivalent to + Java's doubleToLongBits and then encoded in little-endian format. + """ + return STRUCT_DOUBLE.unpack(self.read(8))[0] + + def read_bytes(self): + """ + Bytes are encoded as a long followed by that many bytes of data. + """ + nbytes = self.read_long() + assert (nbytes >= 0), nbytes + return self.read(nbytes) + + def read_utf8(self): + """ + A string is encoded as a long followed by + that many bytes of UTF-8 encoded character data. + """ + input_bytes = self.read_bytes() + if PY3: + try: + return input_bytes.decode('utf-8') + except UnicodeDecodeError as exn: + logger.error('Invalid UTF-8 input bytes: %r', input_bytes) + raise exn + else: + # PY2 + return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable + + def skip_null(self): + pass + + def skip_boolean(self): + self.skip(1) + + def skip_int(self): + self.skip_long() + + def skip_long(self): + b = ord(self.read(1)) + while (b & 0x80) != 0: + b = ord(self.read(1)) + + def skip_float(self): + self.skip(4) + + def skip_double(self): + self.skip(8) + + def skip_bytes(self): + self.skip(self.read_long()) + + def skip_utf8(self): + self.skip_bytes() + + def skip(self, n): + self.reader.seek(self.reader.tell() + n) + + +# ------------------------------------------------------------------------------ +# DatumReader + + +class DatumReader(object): + """Deserialize Avro-encoded data into a Python data structure.""" + + def __init__(self, writer_schema=None): + """ + As defined in the Avro specification, we call the schema encoded + in the data the "writer's schema". + """ + self._writer_schema = writer_schema + + # read/write properties + def set_writer_schema(self, writer_schema): + self._writer_schema = writer_schema + + writer_schema = property(lambda self: self._writer_schema, + set_writer_schema) + + def read(self, decoder): + return self.read_data(self.writer_schema, decoder) + + def read_data(self, writer_schema, decoder): + # function dispatch for reading data based on type of writer's schema + if writer_schema.type == 'null': + result = decoder.read_null() + elif writer_schema.type == 'boolean': + result = decoder.read_boolean() + elif writer_schema.type == 'string': + result = decoder.read_utf8() + elif writer_schema.type == 'int': + result = decoder.read_int() + elif writer_schema.type == 'long': + result = decoder.read_long() + elif writer_schema.type == 'float': + result = decoder.read_float() + elif writer_schema.type == 'double': + result = decoder.read_double() + elif writer_schema.type == 'bytes': + result = decoder.read_bytes() + elif writer_schema.type == 'fixed': + result = self.read_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = self.read_enum(writer_schema, decoder) + elif writer_schema.type == 'array': + result = self.read_array(writer_schema, decoder) + elif writer_schema.type == 'map': + result = self.read_map(writer_schema, decoder) + elif writer_schema.type in ['union', 'error_union']: + result = self.read_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + result = self.read_record(writer_schema, decoder) + else: + fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + def skip_data(self, writer_schema, decoder): + if writer_schema.type == 'null': + result = decoder.skip_null() + elif writer_schema.type == 'boolean': + result = decoder.skip_boolean() + elif writer_schema.type == 'string': + result = decoder.skip_utf8() + elif writer_schema.type == 'int': + result = decoder.skip_int() + elif writer_schema.type == 'long': + result = decoder.skip_long() + elif writer_schema.type == 'float': + result = decoder.skip_float() + elif writer_schema.type == 'double': + result = decoder.skip_double() + elif writer_schema.type == 'bytes': + result = decoder.skip_bytes() + elif writer_schema.type == 'fixed': + result = self.skip_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = self.skip_enum(decoder) + elif writer_schema.type == 'array': + self.skip_array(writer_schema, decoder) + result = None + elif writer_schema.type == 'map': + self.skip_map(writer_schema, decoder) + result = None + elif writer_schema.type in ['union', 'error_union']: + result = self.skip_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + self.skip_record(writer_schema, decoder) + result = None + else: + fail_msg = "Unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + @staticmethod + def read_fixed(writer_schema, decoder): + """ + Fixed instances are encoded using the number of bytes declared + in the schema. + """ + return decoder.read(writer_schema.size) + + @staticmethod + def skip_fixed(writer_schema, decoder): + return decoder.skip(writer_schema.size) + + @staticmethod + def read_enum(writer_schema, decoder): + """ + An enum is encoded by a int, representing the zero-based position + of the symbol in the schema. + """ + # read data + index_of_symbol = decoder.read_int() + if index_of_symbol >= len(writer_schema.symbols): + fail_msg = "Can't access enum index %d for enum with %d symbols" \ + % (index_of_symbol, len(writer_schema.symbols)) + raise SchemaResolutionException(fail_msg, writer_schema) + read_symbol = writer_schema.symbols[index_of_symbol] + return read_symbol + + @staticmethod + def skip_enum(decoder): + return decoder.skip_int() + + def read_array(self, writer_schema, decoder): + """ + Arrays are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many array items. + A block with count zero indicates the end of the array. + Each item is encoded per the array's item schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = [] + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + decoder.read_long() + for _ in range(block_count): + read_items.append(self.read_data(writer_schema.items, decoder)) + block_count = decoder.read_long() + return read_items + + def skip_array(self, writer_schema, decoder): + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = decoder.read_long() + decoder.skip(block_size) + else: + for _ in range(block_count): + self.skip_data(writer_schema.items, decoder) + block_count = decoder.read_long() + + def read_map(self, writer_schema, decoder): + """ + Maps are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many key/value pairs. + A block with count zero indicates the end of the map. + Each item is encoded per the map's value schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = {} + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + decoder.read_long() + for _ in range(block_count): + key = decoder.read_utf8() + read_items[key] = self.read_data(writer_schema.values, decoder) + block_count = decoder.read_long() + return read_items + + def skip_map(self, writer_schema, decoder): + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = decoder.read_long() + decoder.skip(block_size) + else: + for _ in range(block_count): + decoder.skip_utf8() + self.skip_data(writer_schema.values, decoder) + block_count = decoder.read_long() + + def read_union(self, writer_schema, decoder): + """ + A union is encoded by first writing a long value indicating + the zero-based position within the union of the schema of its value. + The value is then encoded per the indicated schema within the union. + """ + # schema resolution + index_of_schema = int(decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + selected_writer_schema = writer_schema.schemas[index_of_schema] + + # read data + return self.read_data(selected_writer_schema, decoder) + + def skip_union(self, writer_schema, decoder): + index_of_schema = int(decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + return self.skip_data(writer_schema.schemas[index_of_schema], decoder) + + def read_record(self, writer_schema, decoder): + """ + A record is encoded by encoding the values of its fields + in the order that they are declared. In other words, a record + is encoded as just the concatenation of the encodings of its fields. + Field values are encoded per their schema. + + Schema Resolution: + * the ordering of fields may be different: fields are matched by name. + * schemas for fields with the same name in both records are resolved + recursively. + * if the writer's record contains a field with a name not present in the + reader's record, the writer's value for that field is ignored. + * if the reader's record schema has a field that contains a default value, + and writer's schema does not have a field with the same name, then the + reader should use the default value from its field. + * if the reader's record schema has a field with no default value, and + writer's schema does not have a field with the same name, then the + field's value is unset. + """ + # schema resolution + read_record = {} + for field in writer_schema.fields: + field_val = self.read_data(field.type, decoder) + read_record[field.name] = field_val + return read_record + + def skip_record(self, writer_schema, decoder): + for field in writer_schema.fields: + self.skip_data(field.type, decoder) + + +# ------------------------------------------------------------------------------ + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/avro_io_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/avro_io_async.py new file mode 100644 index 0000000..e981216 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/avro_io_async.py @@ -0,0 +1,448 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Input/output utilities. + +Includes: + - i/o-specific constants + - i/o-specific exceptions + - schema validation + - leaf value encoding and decoding + - datum reader/writer stuff (?) + +Also includes a generic representation for data, which uses the +following mapping: + - Schema records are implemented as dict. + - Schema arrays are implemented as list. + - Schema maps are implemented as dict. + - Schema strings are implemented as unicode. + - Schema bytes are implemented as str. + - Schema ints are implemented as int. + - Schema longs are implemented as long. + - Schema floats are implemented as float. + - Schema doubles are implemented as float. + - Schema booleans are implemented as bool. +""" + +import logging +import sys + +from ..avro import schema + +from .avro_io import STRUCT_FLOAT, STRUCT_DOUBLE, SchemaResolutionException + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Decoder + + +class AsyncBinaryDecoder(object): + """Read leaf values.""" + + def __init__(self, reader): + """ + reader is a Python object on which we can call read, seek, and tell. + """ + self._reader = reader + + @property + def reader(self): + """Reports the reader used by this decoder.""" + return self._reader + + async def read(self, n): + """Read n bytes. + + Args: + n: Number of bytes to read. + Returns: + The next n bytes from the input. + """ + assert (n >= 0), n + input_bytes = await self.reader.read(n) + if n > 0 and not input_bytes: + raise StopAsyncIteration + assert (len(input_bytes) == n), input_bytes + return input_bytes + + @staticmethod + def read_null(): + """ + null is written as zero bytes + """ + return None + + async def read_boolean(self): + """ + a boolean is written as a single byte + whose value is either 0 (false) or 1 (true). + """ + b = ord(await self.read(1)) + if b == 1: + return True + if b == 0: + return False + fail_msg = "Invalid value for boolean: %s" % b + raise schema.AvroException(fail_msg) + + async def read_int(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + return await self.read_long() + + async def read_long(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + b = ord(await self.read(1)) + n = b & 0x7F + shift = 7 + while (b & 0x80) != 0: + b = ord(await self.read(1)) + n |= (b & 0x7F) << shift + shift += 7 + datum = (n >> 1) ^ -(n & 1) + return datum + + async def read_float(self): + """ + A float is written as 4 bytes. + The float is converted into a 32-bit integer using a method equivalent to + Java's floatToIntBits and then encoded in little-endian format. + """ + return STRUCT_FLOAT.unpack(await self.read(4))[0] + + async def read_double(self): + """ + A double is written as 8 bytes. + The double is converted into a 64-bit integer using a method equivalent to + Java's doubleToLongBits and then encoded in little-endian format. + """ + return STRUCT_DOUBLE.unpack(await self.read(8))[0] + + async def read_bytes(self): + """ + Bytes are encoded as a long followed by that many bytes of data. + """ + nbytes = await self.read_long() + assert (nbytes >= 0), nbytes + return await self.read(nbytes) + + async def read_utf8(self): + """ + A string is encoded as a long followed by + that many bytes of UTF-8 encoded character data. + """ + input_bytes = await self.read_bytes() + if PY3: + try: + return input_bytes.decode('utf-8') + except UnicodeDecodeError as exn: + logger.error('Invalid UTF-8 input bytes: %r', input_bytes) + raise exn + else: + # PY2 + return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable + + def skip_null(self): + pass + + async def skip_boolean(self): + await self.skip(1) + + async def skip_int(self): + await self.skip_long() + + async def skip_long(self): + b = ord(await self.read(1)) + while (b & 0x80) != 0: + b = ord(await self.read(1)) + + async def skip_float(self): + await self.skip(4) + + async def skip_double(self): + await self.skip(8) + + async def skip_bytes(self): + await self.skip(await self.read_long()) + + async def skip_utf8(self): + await self.skip_bytes() + + async def skip(self, n): + await self.reader.seek(await self.reader.tell() + n) + + +# ------------------------------------------------------------------------------ +# DatumReader + + +class AsyncDatumReader(object): + """Deserialize Avro-encoded data into a Python data structure.""" + + def __init__(self, writer_schema=None): + """ + As defined in the Avro specification, we call the schema encoded + in the data the "writer's schema", and the schema expected by the + reader the "reader's schema". + """ + self._writer_schema = writer_schema + + # read/write properties + def set_writer_schema(self, writer_schema): + self._writer_schema = writer_schema + + writer_schema = property(lambda self: self._writer_schema, + set_writer_schema) + + async def read(self, decoder): + return await self.read_data(self.writer_schema, decoder) + + async def read_data(self, writer_schema, decoder): + # function dispatch for reading data based on type of writer's schema + if writer_schema.type == 'null': + result = decoder.read_null() + elif writer_schema.type == 'boolean': + result = await decoder.read_boolean() + elif writer_schema.type == 'string': + result = await decoder.read_utf8() + elif writer_schema.type == 'int': + result = await decoder.read_int() + elif writer_schema.type == 'long': + result = await decoder.read_long() + elif writer_schema.type == 'float': + result = await decoder.read_float() + elif writer_schema.type == 'double': + result = await decoder.read_double() + elif writer_schema.type == 'bytes': + result = await decoder.read_bytes() + elif writer_schema.type == 'fixed': + result = await self.read_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = await self.read_enum(writer_schema, decoder) + elif writer_schema.type == 'array': + result = await self.read_array(writer_schema, decoder) + elif writer_schema.type == 'map': + result = await self.read_map(writer_schema, decoder) + elif writer_schema.type in ['union', 'error_union']: + result = await self.read_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + result = await self.read_record(writer_schema, decoder) + else: + fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + async def skip_data(self, writer_schema, decoder): + if writer_schema.type == 'null': + result = decoder.skip_null() + elif writer_schema.type == 'boolean': + result = await decoder.skip_boolean() + elif writer_schema.type == 'string': + result = await decoder.skip_utf8() + elif writer_schema.type == 'int': + result = await decoder.skip_int() + elif writer_schema.type == 'long': + result = await decoder.skip_long() + elif writer_schema.type == 'float': + result = await decoder.skip_float() + elif writer_schema.type == 'double': + result = await decoder.skip_double() + elif writer_schema.type == 'bytes': + result = await decoder.skip_bytes() + elif writer_schema.type == 'fixed': + result = await self.skip_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = await self.skip_enum(decoder) + elif writer_schema.type == 'array': + await self.skip_array(writer_schema, decoder) + result = None + elif writer_schema.type == 'map': + await self.skip_map(writer_schema, decoder) + result = None + elif writer_schema.type in ['union', 'error_union']: + result = await self.skip_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + await self.skip_record(writer_schema, decoder) + result = None + else: + fail_msg = "Unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + @staticmethod + async def read_fixed(writer_schema, decoder): + """ + Fixed instances are encoded using the number of bytes declared + in the schema. + """ + return await decoder.read(writer_schema.size) + + @staticmethod + async def skip_fixed(writer_schema, decoder): + return await decoder.skip(writer_schema.size) + + @staticmethod + async def read_enum(writer_schema, decoder): + """ + An enum is encoded by a int, representing the zero-based position + of the symbol in the schema. + """ + # read data + index_of_symbol = await decoder.read_int() + if index_of_symbol >= len(writer_schema.symbols): + fail_msg = "Can't access enum index %d for enum with %d symbols" \ + % (index_of_symbol, len(writer_schema.symbols)) + raise SchemaResolutionException(fail_msg, writer_schema) + read_symbol = writer_schema.symbols[index_of_symbol] + return read_symbol + + @staticmethod + async def skip_enum(decoder): + return await decoder.skip_int() + + async def read_array(self, writer_schema, decoder): + """ + Arrays are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many array items. + A block with count zero indicates the end of the array. + Each item is encoded per the array's item schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = [] + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + await decoder.read_long() + for _ in range(block_count): + read_items.append(await self.read_data(writer_schema.items, decoder)) + block_count = await decoder.read_long() + return read_items + + async def skip_array(self, writer_schema, decoder): + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = await decoder.read_long() + await decoder.skip(block_size) + else: + for _ in range(block_count): + await self.skip_data(writer_schema.items, decoder) + block_count = await decoder.read_long() + + async def read_map(self, writer_schema, decoder): + """ + Maps are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many key/value pairs. + A block with count zero indicates the end of the map. + Each item is encoded per the map's value schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = {} + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + await decoder.read_long() + for _ in range(block_count): + key = await decoder.read_utf8() + read_items[key] = await self.read_data(writer_schema.values, decoder) + block_count = await decoder.read_long() + return read_items + + async def skip_map(self, writer_schema, decoder): + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = await decoder.read_long() + await decoder.skip(block_size) + else: + for _ in range(block_count): + await decoder.skip_utf8() + await self.skip_data(writer_schema.values, decoder) + block_count = await decoder.read_long() + + async def read_union(self, writer_schema, decoder): + """ + A union is encoded by first writing a long value indicating + the zero-based position within the union of the schema of its value. + The value is then encoded per the indicated schema within the union. + """ + # schema resolution + index_of_schema = int(await decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + selected_writer_schema = writer_schema.schemas[index_of_schema] + + # read data + return await self.read_data(selected_writer_schema, decoder) + + async def skip_union(self, writer_schema, decoder): + index_of_schema = int(await decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + return await self.skip_data(writer_schema.schemas[index_of_schema], decoder) + + async def read_record(self, writer_schema, decoder): + """ + A record is encoded by encoding the values of its fields + in the order that they are declared. In other words, a record + is encoded as just the concatenation of the encodings of its fields. + Field values are encoded per their schema. + + Schema Resolution: + * the ordering of fields may be different: fields are matched by name. + * schemas for fields with the same name in both records are resolved + recursively. + * if the writer's record contains a field with a name not present in the + reader's record, the writer's value for that field is ignored. + * if the reader's record schema has a field that contains a default value, + and writer's schema does not have a field with the same name, then the + reader should use the default value from its field. + * if the reader's record schema has a field with no default value, and + writer's schema does not have a field with the same name, then the + field's value is unset. + """ + # schema resolution + read_record = {} + for field in writer_schema.fields: + field_val = await self.read_data(field.type, decoder) + read_record[field.name] = field_val + return read_record + + async def skip_record(self, writer_schema, decoder): + for field in writer_schema.fields: + await self.skip_data(field.type, decoder) + + +# ------------------------------------------------------------------------------ + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/datafile.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/datafile.py new file mode 100644 index 0000000..df06fe0 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/datafile.py @@ -0,0 +1,266 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Read/Write Avro File Object Containers.""" + +import io +import logging +import sys +import zlib + +from ..avro import avro_io +from ..avro import schema + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +# Version of the container file: +VERSION = 1 + +if PY3: + MAGIC = b'Obj' + bytes([VERSION]) + MAGIC_SIZE = len(MAGIC) +else: + MAGIC = 'Obj' + chr(VERSION) + MAGIC_SIZE = len(MAGIC) + +# Size of the synchronization marker, in number of bytes: +SYNC_SIZE = 16 + +# Schema of the container header: +META_SCHEMA = schema.parse(""" +{ + "type": "record", "name": "org.apache.avro.file.Header", + "fields": [{ + "name": "magic", + "type": {"type": "fixed", "name": "magic", "size": %(magic_size)d} + }, { + "name": "meta", + "type": {"type": "map", "values": "bytes"} + }, { + "name": "sync", + "type": {"type": "fixed", "name": "sync", "size": %(sync_size)d} + }] +} +""" % { + 'magic_size': MAGIC_SIZE, + 'sync_size': SYNC_SIZE, +}) + +# Codecs supported by container files: +VALID_CODECS = frozenset(['null', 'deflate']) + +# Metadata key associated to the schema: +SCHEMA_KEY = "avro.schema" + + +# ------------------------------------------------------------------------------ +# Exceptions + + +class DataFileException(schema.AvroException): + """Problem reading or writing file object containers.""" + +# ------------------------------------------------------------------------------ + + +class DataFileReader(object): # pylint: disable=too-many-instance-attributes + """Read files written by DataFileWriter.""" + + def __init__(self, reader, datum_reader, **kwargs): + """Initializes a new data file reader. + + Args: + reader: Open file to read from. + datum_reader: Avro datum reader. + """ + self._reader = reader + self._raw_decoder = avro_io.BinaryDecoder(reader) + self._header_reader = kwargs.pop('header_reader', None) + self._header_decoder = None if self._header_reader is None else avro_io.BinaryDecoder(self._header_reader) + self._datum_decoder = None # Maybe reset at every block. + self._datum_reader = datum_reader + + # In case self._reader only has partial content(without header). + # seek(0, 0) to make sure read the (partial)content from beginning. + self._reader.seek(0, 0) + + # read the header: magic, meta, sync + self._read_header() + + # ensure codec is valid + avro_codec_raw = self.get_meta('avro.codec') + if avro_codec_raw is None: + self.codec = "null" + else: + self.codec = avro_codec_raw.decode('utf-8') + if self.codec not in VALID_CODECS: + raise DataFileException('Unknown codec: %s.' % self.codec) + + # get ready to read + self._block_count = 0 + + # object_position is to support reading from current position in the future read, + # no need to downloading from the beginning of avro. + if hasattr(self._reader, 'object_position'): + self.reader.track_object_position() + + self._cur_object_index = 0 + # header_reader indicates reader only has partial content. The reader doesn't have block header, + # so we read use the block count stored last time. + # Also ChangeFeed only has codec==null, so use _raw_decoder is good. + if self._header_reader is not None: + self._datum_decoder = self._raw_decoder + + self.datum_reader.writer_schema = ( + schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) + + def __enter__(self): + return self + + def __exit__(self, data_type, value, traceback): + # Perform a close if there's no exception + if data_type is None: + self.close() + + def __iter__(self): + return self + + # read-only properties + @property + def reader(self): + return self._reader + + @property + def raw_decoder(self): + return self._raw_decoder + + @property + def datum_decoder(self): + return self._datum_decoder + + @property + def datum_reader(self): + return self._datum_reader + + @property + def sync_marker(self): + return self._sync_marker + + @property + def meta(self): + return self._meta + + # read/write properties + @property + def block_count(self): + return self._block_count + + def get_meta(self, key): + """Reports the value of a given metadata key. + + Args: + key: Metadata key (string) to report the value of. + Returns: + Value associated to the metadata key, as bytes. + """ + return self._meta.get(key) + + def _read_header(self): + header_reader = self._header_reader if self._header_reader else self._reader + header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder + + # seek to the beginning of the file to get magic block + header_reader.seek(0, 0) + + # read header into a dict + header = self.datum_reader.read_data(META_SCHEMA, header_decoder) + + # check magic number + if header.get('magic') != MAGIC: + fail_msg = "Not an Avro data file: %s doesn't match %s." \ + % (header.get('magic'), MAGIC) + raise schema.AvroException(fail_msg) + + # set metadata + self._meta = header['meta'] + + # set sync marker + self._sync_marker = header['sync'] + + def _read_block_header(self): + self._block_count = self.raw_decoder.read_long() + if self.codec == "null": + # Skip a long; we don't need to use the length. + self.raw_decoder.skip_long() + self._datum_decoder = self._raw_decoder + elif self.codec == 'deflate': + # Compressed data is stored as (length, data), which + # corresponds to how the "bytes" type is encoded. + data = self.raw_decoder.read_bytes() + # -15 is the log of the window size; negative indicates + # "raw" (no zlib headers) decompression. See zlib.h. + uncompressed = zlib.decompress(data, -15) + self._datum_decoder = avro_io.BinaryDecoder(io.BytesIO(uncompressed)) + else: + raise DataFileException("Unknown codec: %r" % self.codec) + + def _skip_sync(self): + """ + Read the length of the sync marker; if it matches the sync marker, + return True. Otherwise, seek back to where we started and return False. + """ + proposed_sync_marker = self.reader.read(SYNC_SIZE) + if SYNC_SIZE > 0 and not proposed_sync_marker: + raise StopIteration + if proposed_sync_marker != self.sync_marker: + self.reader.seek(-SYNC_SIZE, 1) + + def __next__(self): + """Return the next datum in the file.""" + if self.block_count == 0: + self._skip_sync() + + # object_position is to support reading from current position in the future read, + # no need to downloading from the beginning of avro file with this attr. + if hasattr(self._reader, 'object_position'): + self.reader.track_object_position() + self._cur_object_index = 0 + + self._read_block_header() + + datum = self.datum_reader.read(self.datum_decoder) + self._block_count -= 1 + self._cur_object_index += 1 + + # object_position is to support reading from current position in the future read, + # This will track the index of the next item to be read. + # This will also track the offset before the next sync marker. + if hasattr(self._reader, 'object_position'): + if self.block_count == 0: + # the next event to be read is at index 0 in the new chunk of blocks, + self.reader.track_object_position() + self.reader.set_object_index(0) + else: + self.reader.set_object_index(self._cur_object_index) + + return datum + + # PY2 + def next(self): + return self.__next__() + + def close(self): + """Close this reader.""" + self.reader.close() + + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/datafile_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/datafile_async.py new file mode 100644 index 0000000..1e9d018 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/datafile_async.py @@ -0,0 +1,215 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Read/Write Avro File Object Containers.""" + +import logging +import sys + +from ..avro import avro_io_async +from ..avro import schema +from .datafile import DataFileException +from .datafile import MAGIC, SYNC_SIZE, META_SCHEMA, SCHEMA_KEY + + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +# Codecs supported by container files: +VALID_CODECS = frozenset(['null']) + + +class AsyncDataFileReader(object): # pylint: disable=too-many-instance-attributes + """Read files written by DataFileWriter.""" + + def __init__(self, reader, datum_reader, **kwargs): + """Initializes a new data file reader. + + Args: + reader: Open file to read from. + datum_reader: Avro datum reader. + """ + self._reader = reader + self._raw_decoder = avro_io_async.AsyncBinaryDecoder(reader) + self._header_reader = kwargs.pop('header_reader', None) + self._header_decoder = None if self._header_reader is None else \ + avro_io_async.AsyncBinaryDecoder(self._header_reader) + self._datum_decoder = None # Maybe reset at every block. + self._datum_reader = datum_reader + self.codec = "null" + self._block_count = 0 + self._cur_object_index = 0 + self._meta = None + self._sync_marker = None + + async def init(self): + # In case self._reader only has partial content(without header). + # seek(0, 0) to make sure read the (partial)content from beginning. + await self._reader.seek(0, 0) + + # read the header: magic, meta, sync + await self._read_header() + + # ensure codec is valid + avro_codec_raw = self.get_meta('avro.codec') + if avro_codec_raw is None: + self.codec = "null" + else: + self.codec = avro_codec_raw.decode('utf-8') + if self.codec not in VALID_CODECS: + raise DataFileException('Unknown codec: %s.' % self.codec) + + # get ready to read + self._block_count = 0 + + # object_position is to support reading from current position in the future read, + # no need to downloading from the beginning of avro. + if hasattr(self._reader, 'object_position'): + self.reader.track_object_position() + + # header_reader indicates reader only has partial content. The reader doesn't have block header, + # so we read use the block count stored last time. + # Also ChangeFeed only has codec==null, so use _raw_decoder is good. + if self._header_reader is not None: + self._datum_decoder = self._raw_decoder + self.datum_reader.writer_schema = ( + schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) + return self + + async def __aenter__(self): + return self + + async def __aexit__(self, data_type, value, traceback): + # Perform a close if there's no exception + if data_type is None: + self.close() + + def __aiter__(self): + return self + + # read-only properties + @property + def reader(self): + return self._reader + + @property + def raw_decoder(self): + return self._raw_decoder + + @property + def datum_decoder(self): + return self._datum_decoder + + @property + def datum_reader(self): + return self._datum_reader + + @property + def sync_marker(self): + return self._sync_marker + + @property + def meta(self): + return self._meta + + # read/write properties + @property + def block_count(self): + return self._block_count + + def get_meta(self, key): + """Reports the value of a given metadata key. + + Args: + key: Metadata key (string) to report the value of. + Returns: + Value associated to the metadata key, as bytes. + """ + return self._meta.get(key) + + async def _read_header(self): + header_reader = self._header_reader if self._header_reader else self._reader + header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder + + # seek to the beginning of the file to get magic block + await header_reader.seek(0, 0) + + # read header into a dict + header = await self.datum_reader.read_data(META_SCHEMA, header_decoder) + + # check magic number + if header.get('magic') != MAGIC: + fail_msg = "Not an Avro data file: %s doesn't match %s." \ + % (header.get('magic'), MAGIC) + raise schema.AvroException(fail_msg) + + # set metadata + self._meta = header['meta'] + + # set sync marker + self._sync_marker = header['sync'] + + async def _read_block_header(self): + self._block_count = await self.raw_decoder.read_long() + if self.codec == "null": + # Skip a long; we don't need to use the length. + await self.raw_decoder.skip_long() + self._datum_decoder = self._raw_decoder + else: + raise DataFileException("Unknown codec: %r" % self.codec) + + async def _skip_sync(self): + """ + Read the length of the sync marker; if it matches the sync marker, + return True. Otherwise, seek back to where we started and return False. + """ + proposed_sync_marker = await self.reader.read(SYNC_SIZE) + if SYNC_SIZE > 0 and not proposed_sync_marker: + raise StopAsyncIteration + if proposed_sync_marker != self.sync_marker: + await self.reader.seek(-SYNC_SIZE, 1) + + async def __anext__(self): + """Return the next datum in the file.""" + if self.block_count == 0: + await self._skip_sync() + + # object_position is to support reading from current position in the future read, + # no need to downloading from the beginning of avro file with this attr. + if hasattr(self._reader, 'object_position'): + await self.reader.track_object_position() + self._cur_object_index = 0 + + await self._read_block_header() + + datum = await self.datum_reader.read(self.datum_decoder) + self._block_count -= 1 + self._cur_object_index += 1 + + # object_position is to support reading from current position in the future read, + # This will track the index of the next item to be read. + # This will also track the offset before the next sync marker. + if hasattr(self._reader, 'object_position'): + if self.block_count == 0: + # the next event to be read is at index 0 in the new chunk of blocks, + await self.reader.track_object_position() + await self.reader.set_object_index(0) + else: + await self.reader.set_object_index(self._cur_object_index) + + return datum + + def close(self): + """Close this reader.""" + self.reader.close() + + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/schema.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/schema.py new file mode 100644 index 0000000..ffe2853 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/avro/schema.py @@ -0,0 +1,1221 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines + +"""Representation of Avro schemas. + +A schema may be one of: + - A record, mapping field names to field value data; + - An error, equivalent to a record; + - An enum, containing one of a small set of symbols; + - An array of values, all of the same schema; + - A map containing string/value pairs, each of a declared schema; + - A union of other schemas; + - A fixed sized binary object; + - A unicode string; + - A sequence of bytes; + - A 32-bit signed int; + - A 64-bit signed long; + - A 32-bit floating-point float; + - A 64-bit floating-point double; + - A boolean; + - Null. +""" + +import abc +import json +import logging +import re +import sys +from six import with_metaclass + +PY2 = sys.version_info[0] == 2 + +if PY2: + _str = unicode # pylint: disable=undefined-variable +else: + _str = str + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +# Log level more verbose than DEBUG=10, INFO=20, etc. +DEBUG_VERBOSE = 5 + +NULL = 'null' +BOOLEAN = 'boolean' +STRING = 'string' +BYTES = 'bytes' +INT = 'int' +LONG = 'long' +FLOAT = 'float' +DOUBLE = 'double' +FIXED = 'fixed' +ENUM = 'enum' +RECORD = 'record' +ERROR = 'error' +ARRAY = 'array' +MAP = 'map' +UNION = 'union' + +# Request and error unions are part of Avro protocols: +REQUEST = 'request' +ERROR_UNION = 'error_union' + +PRIMITIVE_TYPES = frozenset([ + NULL, + BOOLEAN, + STRING, + BYTES, + INT, + LONG, + FLOAT, + DOUBLE, +]) + +NAMED_TYPES = frozenset([ + FIXED, + ENUM, + RECORD, + ERROR, +]) + +VALID_TYPES = frozenset.union( + PRIMITIVE_TYPES, + NAMED_TYPES, + [ + ARRAY, + MAP, + UNION, + REQUEST, + ERROR_UNION, + ], +) + +SCHEMA_RESERVED_PROPS = frozenset([ + 'type', + 'name', + 'namespace', + 'fields', # Record + 'items', # Array + 'size', # Fixed + 'symbols', # Enum + 'values', # Map + 'doc', +]) + +FIELD_RESERVED_PROPS = frozenset([ + 'default', + 'name', + 'doc', + 'order', + 'type', +]) + +VALID_FIELD_SORT_ORDERS = frozenset([ + 'ascending', + 'descending', + 'ignore', +]) + + +# ------------------------------------------------------------------------------ +# Exceptions + + +class Error(Exception): + """Base class for errors in this module.""" + + +class AvroException(Error): + """Generic Avro schema error.""" + + +class SchemaParseException(AvroException): + """Error while parsing a JSON schema descriptor.""" + + +class Schema(with_metaclass(abc.ABCMeta, object)): + """Abstract base class for all Schema classes.""" + + def __init__(self, data_type, other_props=None): + """Initializes a new schema object. + + Args: + data_type: Type of the schema to initialize. + other_props: Optional dictionary of additional properties. + """ + if data_type not in VALID_TYPES: + raise SchemaParseException('%r is not a valid Avro type.' % data_type) + + # All properties of this schema, as a map: property name -> property value + self._props = {} + + self._props['type'] = data_type + self._type = data_type + + if other_props: + self._props.update(other_props) + + @property + def namespace(self): + """Returns: the namespace this schema belongs to, if any, or None.""" + return self._props.get('namespace', None) + + @property + def type(self): + """Returns: the type of this schema.""" + return self._type + + @property + def doc(self): + """Returns: the documentation associated to this schema, if any, or None.""" + return self._props.get('doc', None) + + @property + def props(self): + """Reports all the properties of this schema. + + Includes all properties, reserved and non reserved. + JSON properties of this schema are directly generated from this dict. + + Returns: + A dictionary of properties associated to this schema. + """ + return self._props + + @property + def other_props(self): + """Returns: the dictionary of non-reserved properties.""" + return dict(filter_keys_out(items=self._props, keys=SCHEMA_RESERVED_PROPS)) + + def __str__(self): + """Returns: the JSON representation of this schema.""" + return json.dumps(self.to_json(names=None)) + + @abc.abstractmethod + def to_json(self, names): + """Converts the schema object into its AVRO specification representation. + + Schema types that have names (records, enums, and fixed) must + be aware of not re-defining schemas that are already listed + in the parameter names. + """ + raise Exception('Cannot run abstract method.') + + +# ------------------------------------------------------------------------------ + + +_RE_NAME = re.compile(r'[A-Za-z_][A-Za-z0-9_]*') + +_RE_FULL_NAME = re.compile( + r'^' + r'[.]?(?:[A-Za-z_][A-Za-z0-9_]*[.])*' # optional namespace + r'([A-Za-z_][A-Za-z0-9_]*)' # name + r'$' +) + + +class Name(object): + """Representation of an Avro name.""" + + def __init__(self, name, namespace=None): + """Parses an Avro name. + + Args: + name: Avro name to parse (relative or absolute). + namespace: Optional explicit namespace if the name is relative. + """ + # Normalize: namespace is always defined as a string, possibly empty. + if namespace is None: + namespace = '' + + if '.' in name: + # name is absolute, namespace is ignored: + self._fullname = name + + match = _RE_FULL_NAME.match(self._fullname) + if match is None: + raise SchemaParseException( + 'Invalid absolute schema name: %r.' % self._fullname) + + self._name = match.group(1) + self._namespace = self._fullname[:-(len(self._name) + 1)] + + else: + # name is relative, combine with explicit namespace: + self._name = name + self._namespace = namespace + self._fullname = (self._name + if (not self._namespace) else + '%s.%s' % (self._namespace, self._name)) + + # Validate the fullname: + if _RE_FULL_NAME.match(self._fullname) is None: + raise SchemaParseException( + 'Invalid schema name %r infered from name %r and namespace %r.' + % (self._fullname, self._name, self._namespace)) + + def __eq__(self, other): + if not isinstance(other, Name): + return NotImplemented + return self.fullname == other.fullname + + @property + def simple_name(self): + """Returns: the simple name part of this name.""" + return self._name + + @property + def namespace(self): + """Returns: this name's namespace, possible the empty string.""" + return self._namespace + + @property + def fullname(self): + """Returns: the full name.""" + return self._fullname + + +# ------------------------------------------------------------------------------ + + +class Names(object): + """Tracks Avro named schemas and default namespace during parsing.""" + + def __init__(self, default_namespace=None, names=None): + """Initializes a new name tracker. + + Args: + default_namespace: Optional default namespace. + names: Optional initial mapping of known named schemas. + """ + if names is None: + names = {} + self._names = names + self._default_namespace = default_namespace + + @property + def names(self): + """Returns: the mapping of known named schemas.""" + return self._names + + @property + def default_namespace(self): + """Returns: the default namespace, if any, or None.""" + return self._default_namespace + + def new_with_default_namespace(self, namespace): + """Creates a new name tracker from this tracker, but with a new default ns. + + Args: + namespace: New default namespace to use. + Returns: + New name tracker with the specified default namespace. + """ + return Names(names=self._names, default_namespace=namespace) + + def get_name(self, name, namespace=None): + """Resolves the Avro name according to this name tracker's state. + + Args: + name: Name to resolve (absolute or relative). + namespace: Optional explicit namespace. + Returns: + The specified name, resolved according to this tracker. + """ + if namespace is None: + namespace = self._default_namespace + return Name(name=name, namespace=namespace) + + def get_schema(self, name, namespace=None): + """Resolves an Avro schema by name. + + Args: + name: Name (relative or absolute) of the Avro schema to look up. + namespace: Optional explicit namespace. + Returns: + The schema with the specified name, if any, or None. + """ + avro_name = self.get_name(name=name, namespace=namespace) + return self._names.get(avro_name.fullname, None) + + def prune_namespace(self, properties): + """given a properties, return properties with namespace removed if + it matches the own default namespace + """ + if self.default_namespace is None: + # I have no default -- no change + return properties + if 'namespace' not in properties: + # he has no namespace - no change + return properties + if properties['namespace'] != self.default_namespace: + # we're different - leave his stuff alone + return properties + # we each have a namespace and it's redundant. delete his. + prunable = properties.copy() + del prunable['namespace'] + return prunable + + def register(self, schema): + """Registers a new named schema in this tracker. + + Args: + schema: Named Avro schema to register in this tracker. + """ + if schema.fullname in VALID_TYPES: + raise SchemaParseException( + '%s is a reserved type name.' % schema.fullname) + if schema.fullname in self.names: + raise SchemaParseException( + 'Avro name %r already exists.' % schema.fullname) + + logger.log(DEBUG_VERBOSE, 'Register new name for %r', schema.fullname) + self._names[schema.fullname] = schema + + +# ------------------------------------------------------------------------------ + + +class NamedSchema(Schema): + """Abstract base class for named schemas. + + Named schemas are enumerated in NAMED_TYPES. + """ + + def __init__( + self, + data_type, + name=None, + namespace=None, + names=None, + other_props=None, + ): + """Initializes a new named schema object. + + Args: + data_type: Type of the named schema. + name: Name (absolute or relative) of the schema. + namespace: Optional explicit namespace if name is relative. + names: Tracker to resolve and register Avro names. + other_props: Optional map of additional properties of the schema. + """ + assert (data_type in NAMED_TYPES), ('Invalid named type: %r' % data_type) + self._avro_name = names.get_name(name=name, namespace=namespace) + + super(NamedSchema, self).__init__(data_type, other_props) + + names.register(self) + + self._props['name'] = self.name + if self.namespace: + self._props['namespace'] = self.namespace + + @property + def avro_name(self): + """Returns: the Name object describing this schema's name.""" + return self._avro_name + + @property + def name(self): + return self._avro_name.simple_name + + @property + def namespace(self): + return self._avro_name.namespace + + @property + def fullname(self): + return self._avro_name.fullname + + def name_ref(self, names): + """Reports this schema name relative to the specified name tracker. + + Args: + names: Avro name tracker to relativise this schema name against. + Returns: + This schema name, relativised against the specified name tracker. + """ + if self.namespace == names.default_namespace: + return self.name + return self.fullname + + @abc.abstractmethod + def to_json(self, names): + """Converts the schema object into its AVRO specification representation. + + Schema types that have names (records, enums, and fixed) must + be aware of not re-defining schemas that are already listed + in the parameter names. + """ + raise Exception('Cannot run abstract method.') + +# ------------------------------------------------------------------------------ + + +_NO_DEFAULT = object() + + +class Field(object): + """Representation of the schema of a field in a record.""" + + def __init__( + self, + data_type, + name, + index, + has_default, + default=_NO_DEFAULT, + order=None, + doc=None, + other_props=None + ): + """Initializes a new Field object. + + Args: + data_type: Avro schema of the field. + name: Name of the field. + index: 0-based position of the field. + has_default: + default: + order: + doc: + other_props: + """ + if (not isinstance(name, _str)) or (not name): + raise SchemaParseException('Invalid record field name: %r.' % name) + if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS): + raise SchemaParseException('Invalid record field order: %r.' % order) + + # All properties of this record field: + self._props = {} + + self._has_default = has_default + if other_props: + self._props.update(other_props) + + self._index = index + self._type = self._props['type'] = data_type + self._name = self._props['name'] = name + + if has_default: + self._props['default'] = default + + if order is not None: + self._props['order'] = order + + if doc is not None: + self._props['doc'] = doc + + @property + def type(self): + """Returns: the schema of this field.""" + return self._type + + @property + def name(self): + """Returns: this field name.""" + return self._name + + @property + def index(self): + """Returns: the 0-based index of this field in the record.""" + return self._index + + @property + def default(self): + return self._props['default'] + + @property + def has_default(self): + return self._has_default + + @property + def order(self): + return self._props.get('order', None) + + @property + def doc(self): + return self._props.get('doc', None) + + @property + def props(self): + return self._props + + @property + def other_props(self): + return filter_keys_out(items=self._props, keys=FIELD_RESERVED_PROPS) + + def __str__(self): + return json.dumps(self.to_json()) + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = self.props.copy() + to_dump['type'] = self.type.to_json(names) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ +# Primitive Types + + +class PrimitiveSchema(Schema): + """Schema of a primitive Avro type. + + Valid primitive types are defined in PRIMITIVE_TYPES. + """ + + def __init__(self, data_type, other_props=None): + """Initializes a new schema object for the specified primitive type. + + Args: + data_type: Type of the schema to construct. Must be primitive. + """ + if data_type not in PRIMITIVE_TYPES: + raise AvroException('%r is not a valid primitive type.' % data_type) + super(PrimitiveSchema, self).__init__(data_type, other_props=other_props) + + @property + def name(self): + """Returns: the simple name of this schema.""" + # The name of a primitive type is the type itself. + return self.type + + @property + def fullname(self): + """Returns: the fully qualified name of this schema.""" + # The full name is the simple name for primitive schema. + return self.name + + def to_json(self, names=None): + if len(self.props) == 1: + return self.fullname + return self.props + + def __eq__(self, that): + return self.props == that.props + + +# ------------------------------------------------------------------------------ +# Complex Types (non-recursive) + + +class FixedSchema(NamedSchema): + def __init__( + self, + name, + namespace, + size, + names=None, + other_props=None, + ): + # Ensure valid ctor args + if not isinstance(size, int): + fail_msg = 'Fixed Schema requires a valid integer for size property.' + raise AvroException(fail_msg) + + super(FixedSchema, self).__init__( + data_type=FIXED, + name=name, + namespace=namespace, + names=names, + other_props=other_props, + ) + self._props['size'] = size + + @property + def size(self): + """Returns: the size of this fixed schema, in bytes.""" + return self._props['size'] + + def to_json(self, names=None): + if names is None: + names = Names() + if self.fullname in names.names: + return self.name_ref(names) + names.names[self.fullname] = self + return names.prune_namespace(self.props) + + def __eq__(self, that): + return self.props == that.props + + +# ------------------------------------------------------------------------------ + + +class EnumSchema(NamedSchema): + def __init__( + self, + name, + namespace, + symbols, + names=None, + doc=None, + other_props=None, + ): + """Initializes a new enumeration schema object. + + Args: + name: Simple name of this enumeration. + namespace: Optional namespace. + symbols: Ordered list of symbols defined in this enumeration. + names: + doc: + other_props: + """ + symbols = tuple(symbols) + symbol_set = frozenset(symbols) + if (len(symbol_set) != len(symbols) + or not all(map(lambda symbol: isinstance(symbol, _str), symbols))): + raise AvroException( + 'Invalid symbols for enum schema: %r.' % (symbols,)) + + super(EnumSchema, self).__init__( + data_type=ENUM, + name=name, + namespace=namespace, + names=names, + other_props=other_props, + ) + + self._props['symbols'] = symbols + if doc is not None: + self._props['doc'] = doc + + @property + def symbols(self): + """Returns: the symbols defined in this enum.""" + return self._props['symbols'] + + def to_json(self, names=None): + if names is None: + names = Names() + if self.fullname in names.names: + return self.name_ref(names) + names.names[self.fullname] = self + return names.prune_namespace(self.props) + + def __eq__(self, that): + return self.props == that.props + + +# ------------------------------------------------------------------------------ +# Complex Types (recursive) + + +class ArraySchema(Schema): + """Schema of an array.""" + + def __init__(self, items, other_props=None): + """Initializes a new array schema object. + + Args: + items: Avro schema of the array items. + other_props: + """ + super(ArraySchema, self).__init__( + data_type=ARRAY, + other_props=other_props, + ) + self._items_schema = items + self._props['items'] = items + + @property + def items(self): + """Returns: the schema of the items in this array.""" + return self._items_schema + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = self.props.copy() + item_schema = self.items + to_dump['items'] = item_schema.to_json(names) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ + + +class MapSchema(Schema): + """Schema of a map.""" + + def __init__(self, values, other_props=None): + """Initializes a new map schema object. + + Args: + values: Avro schema of the map values. + other_props: + """ + super(MapSchema, self).__init__( + data_type=MAP, + other_props=other_props, + ) + self._values_schema = values + self._props['values'] = values + + @property + def values(self): + """Returns: the schema of the values in this map.""" + return self._values_schema + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = self.props.copy() + to_dump['values'] = self.values.to_json(names) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ + + +class UnionSchema(Schema): + """Schema of a union.""" + + def __init__(self, schemas): + """Initializes a new union schema object. + + Args: + schemas: Ordered collection of schema branches in the union. + """ + super(UnionSchema, self).__init__(data_type=UNION) + self._schemas = tuple(schemas) + + # Validate the schema branches: + + # All named schema names are unique: + named_branches = tuple( + filter(lambda schema: schema.type in NAMED_TYPES, self._schemas)) + unique_names = frozenset(map(lambda schema: schema.fullname, named_branches)) + if len(unique_names) != len(named_branches): + raise AvroException( + 'Invalid union branches with duplicate schema name:%s' + % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) + + # Types are unique within unnamed schemas, and union is not allowed: + unnamed_branches = tuple( + filter(lambda schema: schema.type not in NAMED_TYPES, self._schemas)) + unique_types = frozenset(map(lambda schema: schema.type, unnamed_branches)) + if UNION in unique_types: + raise AvroException( + 'Invalid union branches contain other unions:%s' + % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) + if len(unique_types) != len(unnamed_branches): + raise AvroException( + 'Invalid union branches with duplicate type:%s' + % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) + + @property + def schemas(self): + """Returns: the ordered list of schema branches in the union.""" + return self._schemas + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = [] + for schema in self.schemas: + to_dump.append(schema.to_json(names)) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ + + +class ErrorUnionSchema(UnionSchema): + """Schema representing the declared errors of a protocol message.""" + + def __init__(self, schemas): + """Initializes an error-union schema. + + Args: + schema: collection of error schema. + """ + # Prepend "string" to handle system errors + schemas = [PrimitiveSchema(data_type=STRING)] + list(schemas) + super(ErrorUnionSchema, self).__init__(schemas=schemas) + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = [] + for schema in self.schemas: + # Don't print the system error schema + if schema.type == STRING: + continue + to_dump.append(schema.to_json(names)) + return to_dump + + +# ------------------------------------------------------------------------------ + + +class RecordSchema(NamedSchema): + """Schema of a record.""" + + @staticmethod + def _make_field(index, field_desc, names): + """Builds field schemas from a list of field JSON descriptors. + + Args: + index: 0-based index of the field in the record. + field_desc: JSON descriptors of a record field. + Return: + The field schema. + """ + field_schema = schema_from_json_data( + json_data=field_desc['type'], + names=names, + ) + other_props = ( + dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS))) + return Field( + data_type=field_schema, + name=field_desc['name'], + index=index, + has_default=('default' in field_desc), + default=field_desc.get('default', _NO_DEFAULT), + order=field_desc.get('order', None), + doc=field_desc.get('doc', None), + other_props=other_props, + ) + + @staticmethod + def make_field_list(field_desc_list, names): + """Builds field schemas from a list of field JSON descriptors. + + Guarantees field name unicity. + + Args: + field_desc_list: collection of field JSON descriptors. + names: Avro schema tracker. + Yields + Field schemas. + """ + for index, field_desc in enumerate(field_desc_list): + yield RecordSchema._make_field(index, field_desc, names) + + @staticmethod + def _make_field_map(fields): + """Builds the field map. + + Guarantees field name unicity. + + Args: + fields: iterable of field schema. + Returns: + A map of field schemas, indexed by name. + """ + field_map = {} + for field in fields: + if field.name in field_map: + raise SchemaParseException( + 'Duplicate record field name %r.' % field.name) + field_map[field.name] = field + return field_map + + def __init__( + self, + name, + namespace, + fields=None, + make_fields=None, + names=None, + record_type=RECORD, + doc=None, + other_props=None + ): + """Initializes a new record schema object. + + Args: + name: Name of the record (absolute or relative). + namespace: Optional namespace the record belongs to, if name is relative. + fields: collection of fields to add to this record. + Exactly one of fields or make_fields must be specified. + make_fields: function creating the fields that belong to the record. + The function signature is: make_fields(names) -> ordered field list. + Exactly one of fields or make_fields must be specified. + names: + record_type: Type of the record: one of RECORD, ERROR or REQUEST. + Protocol requests are not named. + doc: + other_props: + """ + if record_type == REQUEST: + # Protocol requests are not named: + super(RecordSchema, self).__init__( + data_type=REQUEST, + other_props=other_props, + ) + elif record_type in [RECORD, ERROR]: + # Register this record name in the tracker: + super(RecordSchema, self).__init__( + data_type=record_type, + name=name, + namespace=namespace, + names=names, + other_props=other_props, + ) + else: + raise SchemaParseException( + 'Invalid record type: %r.' % record_type) + + if record_type in [RECORD, ERROR]: + avro_name = names.get_name(name=name, namespace=namespace) + nested_names = names.new_with_default_namespace(namespace=avro_name.namespace) + elif record_type == REQUEST: + # Protocol request has no name: no need to change default namespace: + nested_names = names + + if fields is None: + fields = make_fields(names=nested_names) + else: + assert make_fields is None + self._fields = tuple(fields) + + self._field_map = RecordSchema._make_field_map(self._fields) + + self._props['fields'] = fields + if doc is not None: + self._props['doc'] = doc + + @property + def fields(self): + """Returns: the field schemas, as an ordered tuple.""" + return self._fields + + @property + def field_map(self): + """Returns: a read-only map of the field schemas index by field names.""" + return self._field_map + + def to_json(self, names=None): + if names is None: + names = Names() + # Request records don't have names + if self.type == REQUEST: + return [f.to_json(names) for f in self.fields] + + if self.fullname in names.names: + return self.name_ref(names) + names.names[self.fullname] = self + + to_dump = names.prune_namespace(self.props.copy()) + to_dump['fields'] = [f.to_json(names) for f in self.fields] + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ +# Module functions + + +def filter_keys_out(items, keys): + """Filters a collection of (key, value) items. + + Exclude any item whose key belongs to keys. + + Args: + items: Dictionary of items to filter the keys out of. + keys: Keys to filter out. + Yields: + Filtered items. + """ + for key, value in items.items(): + if key in keys: + continue + yield key, value + + +# ------------------------------------------------------------------------------ + + +def _schema_from_json_string(json_string, names): + if json_string in PRIMITIVE_TYPES: + return PrimitiveSchema(data_type=json_string) + + # Look for a known named schema: + schema = names.get_schema(name=json_string) + if schema is None: + raise SchemaParseException( + 'Unknown named schema %r, known names: %r.' + % (json_string, sorted(names.names))) + return schema + + +def _schema_from_json_array(json_array, names): + def MakeSchema(desc): + return schema_from_json_data(json_data=desc, names=names) + + return UnionSchema(map(MakeSchema, json_array)) + + +def _schema_from_json_object(json_object, names): + data_type = json_object.get('type') + if data_type is None: + raise SchemaParseException( + 'Avro schema JSON descriptor has no "type" property: %r' % json_object) + + other_props = dict( + filter_keys_out(items=json_object, keys=SCHEMA_RESERVED_PROPS)) + + if data_type in PRIMITIVE_TYPES: + # FIXME should not ignore other properties + result = PrimitiveSchema(data_type, other_props=other_props) + + elif data_type in NAMED_TYPES: + name = json_object.get('name') + namespace = json_object.get('namespace', names.default_namespace) + if data_type == FIXED: + size = json_object.get('size') + result = FixedSchema(name, namespace, size, names, other_props) + elif data_type == ENUM: + symbols = json_object.get('symbols') + doc = json_object.get('doc') + result = EnumSchema(name, namespace, symbols, names, doc, other_props) + + elif data_type in [RECORD, ERROR]: + field_desc_list = json_object.get('fields', ()) + + def MakeFields(names): + return tuple(RecordSchema.make_field_list(field_desc_list, names)) + + result = RecordSchema( + name=name, + namespace=namespace, + make_fields=MakeFields, + names=names, + record_type=data_type, + doc=json_object.get('doc'), + other_props=other_props, + ) + else: + raise Exception('Internal error: unknown type %r.' % data_type) + + elif data_type in VALID_TYPES: + # Unnamed, non-primitive Avro type: + + if data_type == ARRAY: + items_desc = json_object.get('items') + if items_desc is None: + raise SchemaParseException( + 'Invalid array schema descriptor with no "items" : %r.' + % json_object) + result = ArraySchema( + items=schema_from_json_data(items_desc, names), + other_props=other_props, + ) + + elif data_type == MAP: + values_desc = json_object.get('values') + if values_desc is None: + raise SchemaParseException( + 'Invalid map schema descriptor with no "values" : %r.' + % json_object) + result = MapSchema( + values=schema_from_json_data(values_desc, names=names), + other_props=other_props, + ) + + elif data_type == ERROR_UNION: + error_desc_list = json_object.get('declared_errors') + assert error_desc_list is not None + error_schemas = map( + lambda desc: schema_from_json_data(desc, names=names), + error_desc_list) + result = ErrorUnionSchema(schemas=error_schemas) + + else: + raise Exception('Internal error: unknown type %r.' % data_type) + else: + raise SchemaParseException( + 'Invalid JSON descriptor for an Avro schema: %r' % json_object) + return result + + +# Parsers for the JSON data types: +_JSONDataParserTypeMap = { + _str: _schema_from_json_string, + list: _schema_from_json_array, + dict: _schema_from_json_object, +} + + +def schema_from_json_data(json_data, names=None): + """Builds an Avro Schema from its JSON descriptor. + + Args: + json_data: JSON data representing the descriptor of the Avro schema. + names: Optional tracker for Avro named schemas. + Returns: + The Avro schema parsed from the JSON descriptor. + Raises: + SchemaParseException: if the descriptor is invalid. + """ + if names is None: + names = Names() + + # Select the appropriate parser based on the JSON data type: + parser = _JSONDataParserTypeMap.get(type(json_data)) + if parser is None: + raise SchemaParseException( + 'Invalid JSON descriptor for an Avro schema: %r.' % json_data) + return parser(json_data, names=names) + + +# ------------------------------------------------------------------------------ + + +def parse(json_string): + """Constructs a Schema from its JSON descriptor in text form. + + Args: + json_string: String representation of the JSON descriptor of the schema. + Returns: + The parsed schema. + Raises: + SchemaParseException: on JSON parsing error, + or if the JSON descriptor is invalid. + """ + try: + json_data = json.loads(json_string) + except Exception as exn: + raise SchemaParseException( + 'Error parsing schema from JSON: %r. ' + 'Error message: %r.' + % (json_string, exn)) + + # Initialize the names object + names = Names() + + # construct the Avro Schema object + return schema_from_json_data(json_data, names) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/base_client.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/base_client.py new file mode 100644 index 0000000..a2efa21 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/base_client.py @@ -0,0 +1,460 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import logging +import uuid +from typing import ( # pylint: disable=unused-import + Optional, + Any, + Tuple, +) + +try: + from urllib.parse import parse_qs, quote +except ImportError: + from urlparse import parse_qs # type: ignore + from urllib2 import quote # type: ignore + +import six + +from azure.core.configuration import Configuration +from azure.core.credentials import AzureSasCredential +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline import Pipeline +from azure.core.pipeline.transport import RequestsTransport, HttpTransport +from azure.core.pipeline.policies import ( + RedirectPolicy, + ContentDecodePolicy, + BearerTokenCredentialPolicy, + ProxyPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, + UserAgentPolicy, + AzureSasCredentialPolicy +) + +from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .models import LocationMode +from .authentication import SharedKeyCredentialPolicy +from .shared_access_signature import QueryStringConstants +from .request_handlers import serialize_batch_body, _get_batch_request_delimiter +from .policies import ( + StorageHeadersPolicy, + StorageContentValidation, + StorageRequestHook, + StorageResponseHook, + StorageLoggingPolicy, + StorageHosts, + QueueMessagePolicy, + ExponentialRetry, +) +from .._version import VERSION +from .response_handlers import process_storage_error, PartialBatchErrorException + + +_LOGGER = logging.getLogger(__name__) +_SERVICE_PARAMS = { + "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, + "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, + "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, + "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, +} + + +class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + parsed_url, # type: Any + service, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) + self._hosts = kwargs.get("_hosts") + self.scheme = parsed_url.scheme + + if service not in ["blob", "queue", "file-share", "dfs"]: + raise ValueError("Invalid service: {}".format(service)) + service_name = service.split('-')[0] + account = parsed_url.netloc.split(".{}.core.".format(service_name)) + + self.account_name = account[0] if len(account) > 1 else None + if not self.account_name and parsed_url.netloc.startswith("localhost") \ + or parsed_url.netloc.startswith("127.0.0.1"): + self.account_name = parsed_url.path.strip("/") + + self.credential = _format_shared_key_credential(self.account_name, credential) + if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): + raise ValueError("Token credential is only supported with HTTPS.") + + secondary_hostname = None + if hasattr(self.credential, "account_name"): + self.account_name = self.credential.account_name + secondary_hostname = "{}-secondary.{}.{}".format( + self.credential.account_name, service_name, SERVICE_HOST_BASE) + + if not self._hosts: + if len(account) > 1: + secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") + if kwargs.get("secondary_hostname"): + secondary_hostname = kwargs["secondary_hostname"] + primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') + self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} + + self.require_encryption = kwargs.get("require_encryption", False) + self.key_encryption_key = kwargs.get("key_encryption_key") + self.key_resolver_function = kwargs.get("key_resolver_function") + self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) + + def __enter__(self): + self._client.__enter__() + return self + + def __exit__(self, *args): + self._client.__exit__(*args) + + def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._client.close() + + @property + def url(self): + """The full endpoint URL to this entity, including SAS token if used. + + This could be either the primary endpoint, + or the secondary endpoint depending on the current :func:`location_mode`. + """ + return self._format_url(self._hosts[self._location_mode]) + + @property + def primary_endpoint(self): + """The full primary endpoint URL. + + :type: str + """ + return self._format_url(self._hosts[LocationMode.PRIMARY]) + + @property + def primary_hostname(self): + """The hostname of the primary endpoint. + + :type: str + """ + return self._hosts[LocationMode.PRIMARY] + + @property + def secondary_endpoint(self): + """The full secondary endpoint URL if configured. + + If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str + :raise ValueError: + """ + if not self._hosts[LocationMode.SECONDARY]: + raise ValueError("No secondary host configured.") + return self._format_url(self._hosts[LocationMode.SECONDARY]) + + @property + def secondary_hostname(self): + """The hostname of the secondary endpoint. + + If not available this will be None. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str or None + """ + return self._hosts[LocationMode.SECONDARY] + + @property + def location_mode(self): + """The location mode that the client is currently using. + + By default this will be "primary". Options include "primary" and "secondary". + + :type: str + """ + + return self._location_mode + + @location_mode.setter + def location_mode(self, value): + if self._hosts.get(value): + self._location_mode = value + self._client._config.url = self.url # pylint: disable=protected-access + else: + raise ValueError("No host URL for location mode: {}".format(value)) + + @property + def api_version(self): + """The version of the Storage API used for requests. + + :type: str + """ + return self._client._config.version # pylint: disable=protected-access + + def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): + query_str = "?" + if snapshot: + query_str += "snapshot={}&".format(self.snapshot) + if share_snapshot: + query_str += "sharesnapshot={}&".format(self.snapshot) + if sas_token and isinstance(credential, AzureSasCredential): + raise ValueError( + "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") + if sas_token and not credential: + query_str += sas_token + elif is_credential_sastoken(credential): + query_str += credential.lstrip("?") + credential = None + return query_str.rstrip("?&"), credential + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, "get_token"): + self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif isinstance(credential, AzureSasCredential): + self._credential_policy = AzureSasCredentialPolicy(credential) + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + + config = kwargs.get("_configuration") or create_configuration(**kwargs) + if kwargs.get("_pipeline"): + return config, kwargs["_pipeline"] + config.transport = kwargs.get("transport") # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + config.transport = RequestsTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + ContentDecodePolicy(response_encoding="utf-8"), + RedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), + config.retry_policy, + config.headers_policy, + StorageRequestHook(**kwargs), + self._credential_policy, + config.logging_policy, + StorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs) + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, Pipeline(config.transport, policies=policies) + + def _batch_send( + self, + *reqs, # type: HttpRequest + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + batch_id = str(uuid.uuid1()) + + request = self._client._client.post( # pylint: disable=protected-access + url='{}://{}/{}?{}comp=batch{}{}'.format( + self.scheme, + self.primary_hostname, + kwargs.pop('path', ""), + kwargs.pop('restype', ""), + kwargs.pop('sas', ""), + kwargs.pop('timeout', "") + ), + headers={ + 'x-ms-version': self.api_version, + "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) + } + ) + + policies = [StorageHeadersPolicy()] + if self._credential_policy: + policies.append(self._credential_policy) + + request.set_multipart_mixed( + *reqs, + policies=policies, + enforce_https=False + ) + + Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access + body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) + request.set_bytes_body(body) + + temp = request.multipart_mixed_info + request.multipart_mixed_info = None + pipeline_response = self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + request.multipart_mixed_info = temp + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() + if raise_on_any_failure: + parts = list(response.parts()) + if any(p for p in parts if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts + ) + raise error + return iter(parts) + return parts + except HttpResponseError as error: + process_storage_error(error) + +class TransportWrapper(HttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, transport): + self._transport = transport + + def send(self, request, **kwargs): + return self._transport.send(request, **kwargs) + + def open(self): + pass + + def close(self): + pass + + def __enter__(self): + pass + + def __exit__(self, *args): # pylint: disable=arguments-differ + pass + + +def _format_shared_key_credential(account_name, credential): + if isinstance(credential, six.string_types): + if not account_name: + raise ValueError("Unable to determine account name for shared key credential.") + credential = {"account_name": account_name, "account_key": credential} + if isinstance(credential, dict): + if "account_name" not in credential: + raise ValueError("Shared key credential missing 'account_name") + if "account_key" not in credential: + raise ValueError("Shared key credential missing 'account_key") + return SharedKeyCredentialPolicy(**credential) + return credential + + +def parse_connection_str(conn_str, credential, service): + conn_str = conn_str.rstrip(";") + conn_settings = [s.split("=", 1) for s in conn_str.split(";")] + if any(len(tup) != 2 for tup in conn_settings): + raise ValueError("Connection string is either blank or malformed.") + conn_settings = dict((key.upper(), val) for key, val in conn_settings) + endpoints = _SERVICE_PARAMS[service] + primary = None + secondary = None + if not credential: + try: + credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} + except KeyError: + credential = conn_settings.get("SHAREDACCESSSIGNATURE") + if endpoints["primary"] in conn_settings: + primary = conn_settings[endpoints["primary"]] + if endpoints["secondary"] in conn_settings: + secondary = conn_settings[endpoints["secondary"]] + else: + if endpoints["secondary"] in conn_settings: + raise ValueError("Connection string specifies only secondary endpoint.") + try: + primary = "{}://{}.{}.{}".format( + conn_settings["DEFAULTENDPOINTSPROTOCOL"], + conn_settings["ACCOUNTNAME"], + service, + conn_settings["ENDPOINTSUFFIX"], + ) + secondary = "{}-secondary.{}.{}".format( + conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] + ) + except KeyError: + pass + + if not primary: + try: + primary = "https://{}.{}.{}".format( + conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) + ) + except KeyError: + raise ValueError("Connection string missing required connection details.") + return primary, secondary, credential + + +def create_configuration(**kwargs): + # type: (**Any) -> Configuration + config = Configuration(**kwargs) + config.headers_policy = StorageHeadersPolicy(**kwargs) + config.user_agent_policy = UserAgentPolicy( + sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) + config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + config.logging_policy = StorageLoggingPolicy(**kwargs) + config.proxy_policy = ProxyPolicy(**kwargs) + + # Storage settings + config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) + config.copy_polling_interval = 15 + + # Block blob uploads + config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) + config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) + config.use_byte_buffer = kwargs.get("use_byte_buffer", False) + + # Page blob uploads + config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) + + # Datalake file uploads + config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) + + # Blob downloads + config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) + config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) + + # File uploads + config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) + return config + + +def parse_query(query_str): + sas_values = QueryStringConstants.to_list() + parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} + sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] + sas_token = None + if sas_params: + sas_token = "&".join(sas_params) + + snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") + return snapshot, sas_token + + +def is_credential_sastoken(credential): + if not credential or not isinstance(credential, six.string_types): + return False + + sas_values = QueryStringConstants.to_list() + parsed_query = parse_qs(credential.lstrip("?")) + if parsed_query and all([k in sas_values for k in parsed_query.keys()]): + return True + return False diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/base_client_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/base_client_async.py new file mode 100644 index 0000000..3e619c9 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/base_client_async.py @@ -0,0 +1,192 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging + +from azure.core.credentials import AzureSasCredential +from azure.core.pipeline import AsyncPipeline +from azure.core.async_paging import AsyncList +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline.policies import ( + ContentDecodePolicy, + AsyncBearerTokenCredentialPolicy, + AsyncRedirectPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, + AzureSasCredentialPolicy, +) +from azure.core.pipeline.transport import AsyncHttpTransport + +from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .authentication import SharedKeyCredentialPolicy +from .base_client import create_configuration +from .policies import ( + StorageContentValidation, + StorageRequestHook, + StorageHosts, + StorageHeadersPolicy, + QueueMessagePolicy +) +from .policies_async import AsyncStorageResponseHook + +from .response_handlers import process_storage_error, PartialBatchErrorException + +if TYPE_CHECKING: + from azure.core.pipeline import Pipeline + from azure.core.pipeline.transport import HttpRequest + from azure.core.configuration import Configuration +_LOGGER = logging.getLogger(__name__) + + +class AsyncStorageAccountHostsMixin(object): + + def __enter__(self): + raise TypeError("Async client only supports 'async with'.") + + def __exit__(self, *args): + pass + + async def __aenter__(self): + await self._client.__aenter__() + return self + + async def __aexit__(self, *args): + await self._client.__aexit__(*args) + + async def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._client.close() + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, 'get_token'): + self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif isinstance(credential, AzureSasCredential): + self._credential_policy = AzureSasCredentialPolicy(credential) + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + config = kwargs.get('_configuration') or create_configuration(**kwargs) + if kwargs.get('_pipeline'): + return config, kwargs['_pipeline'] + config.transport = kwargs.get('transport') # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + try: + from azure.core.pipeline.transport import AioHttpTransport + except ImportError: + raise ImportError("Unable to create async transport. Please check aiohttp is installed.") + config.transport = AioHttpTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.headers_policy, + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + StorageRequestHook(**kwargs), + self._credential_policy, + ContentDecodePolicy(response_encoding="utf-8"), + AsyncRedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), # type: ignore + config.retry_policy, + config.logging_policy, + AsyncStorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs), + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, AsyncPipeline(config.transport, policies=policies) + + async def _batch_send( + self, + *reqs, # type: HttpRequest + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + request = self._client._client.post( # pylint: disable=protected-access + url='{}://{}/{}?{}comp=batch{}{}'.format( + self.scheme, + self.primary_hostname, + kwargs.pop('path', ""), + kwargs.pop('restype', ""), + kwargs.pop('sas', ""), + kwargs.pop('timeout', "") + ), + headers={ + 'x-ms-version': self.api_version + } + ) + + policies = [StorageHeadersPolicy()] + if self._credential_policy: + policies.append(self._credential_policy) + + request.set_multipart_mixed( + *reqs, + policies=policies, + enforce_https=False + ) + + pipeline_response = await self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() # Return an AsyncIterator + if raise_on_any_failure: + parts_list = [] + async for part in parts: + parts_list.append(part) + if any(p for p in parts_list if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts_list + ) + raise error + return AsyncList(parts_list) + return parts + except HttpResponseError as error: + process_storage_error(error) + + +class AsyncTransportWrapper(AsyncHttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, async_transport): + self._transport = async_transport + + async def send(self, request, **kwargs): + return await self._transport.send(request, **kwargs) + + async def open(self): + pass + + async def close(self): + pass + + async def __aenter__(self): + pass + + async def __aexit__(self, *args): # pylint: disable=arguments-differ + pass diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/constants.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/constants.py new file mode 100644 index 0000000..bdee829 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/constants.py @@ -0,0 +1,27 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +from .._generated import AzureBlobStorage + + +X_MS_VERSION = AzureBlobStorage(url="get_api_version")._config.version # pylint: disable=protected-access + +# Socket timeout in seconds +CONNECTION_TIMEOUT = 20 +READ_TIMEOUT = 20 + +# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) +# The socket timeout is now the maximum total duration to send all data. +if sys.version_info >= (3, 5): + # the timeout to connect is 20 seconds, and the read timeout is 80000 seconds + # the 80000 seconds was calculated with: + # 4000MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) + READ_TIMEOUT = 80000 + +STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" + +SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/encryption.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/encryption.py new file mode 100644 index 0000000..62607cc --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/encryption.py @@ -0,0 +1,542 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os +from os import urandom +from json import ( + dumps, + loads, +) +from collections import OrderedDict + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import Cipher +from cryptography.hazmat.primitives.ciphers.algorithms import AES +from cryptography.hazmat.primitives.ciphers.modes import CBC +from cryptography.hazmat.primitives.padding import PKCS7 + +from azure.core.exceptions import HttpResponseError + +from .._version import VERSION +from . import encode_base64, decode_base64_to_bytes + + +_ENCRYPTION_PROTOCOL_V1 = '1.0' +_ERROR_OBJECT_INVALID = \ + '{0} does not define a complete interface. Value of {1} is either missing or invalid.' + + +def _validate_not_none(param_name, param): + if param is None: + raise ValueError('{0} should not be None.'.format(param_name)) + + +def _validate_key_encryption_key_wrap(kek): + # Note that None is not callable and so will fail the second clause of each check. + if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) + if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) + + +class _EncryptionAlgorithm(object): + ''' + Specifies which client encryption algorithm is used. + ''' + AES_CBC_256 = 'AES_CBC_256' + + +class _WrappedContentKey: + ''' + Represents the envelope key details stored on the service. + ''' + + def __init__(self, algorithm, encrypted_key, key_id): + ''' + :param str algorithm: + The algorithm used for wrapping. + :param bytes encrypted_key: + The encrypted content-encryption-key. + :param str key_id: + The key-encryption-key identifier string. + ''' + + _validate_not_none('algorithm', algorithm) + _validate_not_none('encrypted_key', encrypted_key) + _validate_not_none('key_id', key_id) + + self.algorithm = algorithm + self.encrypted_key = encrypted_key + self.key_id = key_id + + +class _EncryptionAgent: + ''' + Represents the encryption agent stored on the service. + It consists of the encryption protocol version and encryption algorithm used. + ''' + + def __init__(self, encryption_algorithm, protocol): + ''' + :param _EncryptionAlgorithm encryption_algorithm: + The algorithm used for encrypting the message contents. + :param str protocol: + The protocol version used for encryption. + ''' + + _validate_not_none('encryption_algorithm', encryption_algorithm) + _validate_not_none('protocol', protocol) + + self.encryption_algorithm = str(encryption_algorithm) + self.protocol = protocol + + +class _EncryptionData: + ''' + Represents the encryption data that is stored on the service. + ''' + + def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, + key_wrapping_metadata): + ''' + :param bytes content_encryption_IV: + The content encryption initialization vector. + :param _EncryptionAgent encryption_agent: + The encryption agent. + :param _WrappedContentKey wrapped_content_key: + An object that stores the wrapping algorithm, the key identifier, + and the encrypted key bytes. + :param dict key_wrapping_metadata: + A dict containing metadata related to the key wrapping. + ''' + + _validate_not_none('content_encryption_IV', content_encryption_IV) + _validate_not_none('encryption_agent', encryption_agent) + _validate_not_none('wrapped_content_key', wrapped_content_key) + + self.content_encryption_IV = content_encryption_IV + self.encryption_agent = encryption_agent + self.wrapped_content_key = wrapped_content_key + self.key_wrapping_metadata = key_wrapping_metadata + + +def _generate_encryption_data_dict(kek, cek, iv): + ''' + Generates and returns the encryption metadata as a dict. + + :param object kek: The key encryption key. See calling functions for more information. + :param bytes cek: The content encryption key. + :param bytes iv: The initialization vector. + :return: A dict containing all the encryption metadata. + :rtype: dict + ''' + # Encrypt the cek. + wrapped_cek = kek.wrap_key(cek) + + # Build the encryption_data dict. + # Use OrderedDict to comply with Java's ordering requirement. + wrapped_content_key = OrderedDict() + wrapped_content_key['KeyId'] = kek.get_kid() + wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) + wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() + + encryption_agent = OrderedDict() + encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 + encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 + + encryption_data_dict = OrderedDict() + encryption_data_dict['WrappedContentKey'] = wrapped_content_key + encryption_data_dict['EncryptionAgent'] = encryption_agent + encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) + encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} + + return encryption_data_dict + + +def _dict_to_encryption_data(encryption_data_dict): + ''' + Converts the specified dictionary to an EncryptionData object for + eventual use in decryption. + + :param dict encryption_data_dict: + The dictionary containing the encryption data. + :return: an _EncryptionData object built from the dictionary. + :rtype: _EncryptionData + ''' + try: + if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: + raise ValueError("Unsupported encryption version.") + except KeyError: + raise ValueError("Unsupported encryption version.") + wrapped_content_key = encryption_data_dict['WrappedContentKey'] + wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], + decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), + wrapped_content_key['KeyId']) + + encryption_agent = encryption_data_dict['EncryptionAgent'] + encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], + encryption_agent['Protocol']) + + if 'KeyWrappingMetadata' in encryption_data_dict: + key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] + else: + key_wrapping_metadata = None + + encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), + encryption_agent, + wrapped_content_key, + key_wrapping_metadata) + + return encryption_data + + +def _generate_AES_CBC_cipher(cek, iv): + ''' + Generates and returns an encryption cipher for AES CBC using the given cek and iv. + + :param bytes[] cek: The content encryption key for the cipher. + :param bytes[] iv: The initialization vector for the cipher. + :return: A cipher for encrypting in AES256 CBC. + :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher + ''' + + backend = default_backend() + algorithm = AES(cek) + mode = CBC(iv) + return Cipher(algorithm, mode, backend) + + +def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): + ''' + Extracts and returns the content_encryption_key stored in the encryption_data object + and performs necessary validation on all parameters. + :param _EncryptionData encryption_data: + The encryption metadata of the retrieved value. + :param obj key_encryption_key: + The key_encryption_key used to unwrap the cek. Please refer to high-level service object + instance variables for more details. + :param func key_resolver: + A function used that, given a key_id, will return a key_encryption_key. Please refer + to high-level service object instance variables for more details. + :return: the content_encryption_key stored in the encryption_data object. + :rtype: bytes[] + ''' + + _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) + _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) + + if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: + raise ValueError('Encryption version is not supported.') + + content_encryption_key = None + + # If the resolver exists, give priority to the key it finds. + if key_resolver is not None: + key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) + + _validate_not_none('key_encryption_key', key_encryption_key) + if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) + if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): + raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') + # Will throw an exception if the specified algorithm is not supported. + content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, + encryption_data.wrapped_content_key.algorithm) + _validate_not_none('content_encryption_key', content_encryption_key) + + return content_encryption_key + + +def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): + ''' + Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. + Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). + Returns the original plaintex. + + :param str message: + The ciphertext to be decrypted. + :param _EncryptionData encryption_data: + The metadata associated with this ciphertext. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted plaintext. + :rtype: str + ''' + _validate_not_none('message', message) + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) + + if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: + raise ValueError('Specified encryption algorithm is not supported.') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) + + # decrypt data + decrypted_data = message + decryptor = cipher.decryptor() + decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) + + # unpad data + unpadder = PKCS7(128).unpadder() + decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) + + return decrypted_data + + +def encrypt_blob(blob, key_encryption_key): + ''' + Encrypts the given blob using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encryption metadata. This method should + only be used when a blob is small enough for single shot upload. Encrypting larger blobs + is done as a part of the upload_data_chunks method. + + :param bytes blob: + The blob to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. + :rtype: (str, bytes) + ''' + + _validate_not_none('blob', blob) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(blob) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + + return dumps(encryption_data), encrypted_data + + +def generate_blob_encryption_data(key_encryption_key): + ''' + Generates the encryption_metadata for the blob. + + :param bytes key_encryption_key: + The key-encryption-key used to wrap the cek associate with this blob. + :return: A tuple containing the cek and iv for this blob as well as the + serialized encryption metadata for the blob. + :rtype: (bytes, bytes, str) + ''' + encryption_data = None + content_encryption_key = None + initialization_vector = None + if key_encryption_key: + _validate_key_encryption_key_wrap(key_encryption_key) + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + encryption_data = _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + encryption_data = dumps(encryption_data) + + return content_encryption_key, initialization_vector, encryption_data + + +def decrypt_blob(require_encryption, key_encryption_key, key_resolver, + content, start_offset, end_offset, response_headers): + ''' + Decrypts the given blob contents and returns only the requested range. + + :param bool require_encryption: + Whether or not the calling blob service requires objects to be decrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :param key_resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted blob content. + :rtype: bytes + ''' + try: + encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) + except: # pylint: disable=bare-except + if require_encryption: + raise ValueError( + 'Encryption required, but received data does not contain appropriate metatadata.' + \ + 'Data was either not encrypted or metadata has been lost.') + + return content + + if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: + raise ValueError('Specified encryption algorithm is not supported.') + + blob_type = response_headers['x-ms-blob-type'] + + iv = None + unpad = False + if 'content-range' in response_headers: + content_range = response_headers['content-range'] + # Format: 'bytes x-y/size' + + # Ignore the word 'bytes' + content_range = content_range.split(' ') + + content_range = content_range[1].split('-') + content_range = content_range[1].split('/') + end_range = int(content_range[0]) + blob_size = int(content_range[1]) + + if start_offset >= 16: + iv = content[:16] + content = content[16:] + start_offset -= 16 + else: + iv = encryption_data.content_encryption_IV + + if end_range == blob_size - 1: + unpad = True + else: + unpad = True + iv = encryption_data.content_encryption_IV + + if blob_type == 'PageBlob': + unpad = False + + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) + cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) + decryptor = cipher.decryptor() + + content = decryptor.update(content) + decryptor.finalize() + if unpad: + unpadder = PKCS7(128).unpadder() + content = unpadder.update(content) + unpadder.finalize() + + return content[start_offset: len(content) - end_offset] + + +def get_blob_encryptor_and_padder(cek, iv, should_pad): + encryptor = None + padder = None + + if cek is not None and iv is not None: + cipher = _generate_AES_CBC_cipher(cek, iv) + encryptor = cipher.encryptor() + padder = PKCS7(128).padder() if should_pad else None + + return encryptor, padder + + +def encrypt_queue_message(message, key_encryption_key): + ''' + Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encrypted message and the encryption metadata. + + :param object message: + The plain text messge to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A json-formatted string containing the encrypted message and the encryption metadata. + :rtype: str + ''' + + _validate_not_none('message', message) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = os.urandom(32) + initialization_vector = os.urandom(16) + + # Queue encoding functions all return unicode strings, and encryption should + # operate on binary strings. + message = message.encode('utf-8') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(message) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + + # Build the dictionary structure. + queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), + 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector)} + + return dumps(queue_message) + + +def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): + ''' + Returns the decrypted message contents from an EncryptedQueueMessage. + If no encryption metadata is present, will return the unaltered message. + :param str message: + The JSON formatted QueueEncryptedMessage contents with all associated metadata. + :param bool require_encryption: + If set, will enforce that the retrieved messages are encrypted and decrypt them. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The plain text message from the queue message. + :rtype: str + ''' + + try: + message = loads(message) + + encryption_data = _dict_to_encryption_data(message['EncryptionData']) + decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) + except (KeyError, ValueError): + # Message was not json formatted and so was not encrypted + # or the user provided a json formatted message. + if require_encryption: + raise ValueError('Message was not encrypted.') + + return message + try: + return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=response, + error=error) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/models.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/models.py new file mode 100644 index 0000000..6f6052a --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/models.py @@ -0,0 +1,473 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-instance-attributes +from enum import Enum + + +def get_enum_value(value): + if value is None or value in ["None", ""]: + return None + try: + return value.value + except AttributeError: + return value + + +class StorageErrorCode(str, Enum): + + # Generic storage values + account_already_exists = "AccountAlreadyExists" + account_being_created = "AccountBeingCreated" + account_is_disabled = "AccountIsDisabled" + authentication_failed = "AuthenticationFailed" + authorization_failure = "AuthorizationFailure" + no_authentication_information = "NoAuthenticationInformation" + condition_headers_not_supported = "ConditionHeadersNotSupported" + condition_not_met = "ConditionNotMet" + empty_metadata_key = "EmptyMetadataKey" + insufficient_account_permissions = "InsufficientAccountPermissions" + internal_error = "InternalError" + invalid_authentication_info = "InvalidAuthenticationInfo" + invalid_header_value = "InvalidHeaderValue" + invalid_http_verb = "InvalidHttpVerb" + invalid_input = "InvalidInput" + invalid_md5 = "InvalidMd5" + invalid_metadata = "InvalidMetadata" + invalid_query_parameter_value = "InvalidQueryParameterValue" + invalid_range = "InvalidRange" + invalid_resource_name = "InvalidResourceName" + invalid_uri = "InvalidUri" + invalid_xml_document = "InvalidXmlDocument" + invalid_xml_node_value = "InvalidXmlNodeValue" + md5_mismatch = "Md5Mismatch" + metadata_too_large = "MetadataTooLarge" + missing_content_length_header = "MissingContentLengthHeader" + missing_required_query_parameter = "MissingRequiredQueryParameter" + missing_required_header = "MissingRequiredHeader" + missing_required_xml_node = "MissingRequiredXmlNode" + multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" + operation_timed_out = "OperationTimedOut" + out_of_range_input = "OutOfRangeInput" + out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" + request_body_too_large = "RequestBodyTooLarge" + resource_type_mismatch = "ResourceTypeMismatch" + request_url_failed_to_parse = "RequestUrlFailedToParse" + resource_already_exists = "ResourceAlreadyExists" + resource_not_found = "ResourceNotFound" + server_busy = "ServerBusy" + unsupported_header = "UnsupportedHeader" + unsupported_xml_node = "UnsupportedXmlNode" + unsupported_query_parameter = "UnsupportedQueryParameter" + unsupported_http_verb = "UnsupportedHttpVerb" + + # Blob values + append_position_condition_not_met = "AppendPositionConditionNotMet" + blob_already_exists = "BlobAlreadyExists" + blob_not_found = "BlobNotFound" + blob_overwritten = "BlobOverwritten" + blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" + block_count_exceeds_limit = "BlockCountExceedsLimit" + block_list_too_long = "BlockListTooLong" + cannot_change_to_lower_tier = "CannotChangeToLowerTier" + cannot_verify_copy_source = "CannotVerifyCopySource" + container_already_exists = "ContainerAlreadyExists" + container_being_deleted = "ContainerBeingDeleted" + container_disabled = "ContainerDisabled" + container_not_found = "ContainerNotFound" + content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" + copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" + copy_id_mismatch = "CopyIdMismatch" + feature_version_mismatch = "FeatureVersionMismatch" + incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" + incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" + infinite_lease_duration_required = "InfiniteLeaseDurationRequired" + invalid_blob_or_block = "InvalidBlobOrBlock" + invalid_blob_tier = "InvalidBlobTier" + invalid_blob_type = "InvalidBlobType" + invalid_block_id = "InvalidBlockId" + invalid_block_list = "InvalidBlockList" + invalid_operation = "InvalidOperation" + invalid_page_range = "InvalidPageRange" + invalid_source_blob_type = "InvalidSourceBlobType" + invalid_source_blob_url = "InvalidSourceBlobUrl" + invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" + lease_already_present = "LeaseAlreadyPresent" + lease_already_broken = "LeaseAlreadyBroken" + lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" + lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" + lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" + lease_id_missing = "LeaseIdMissing" + lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" + lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" + lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" + lease_lost = "LeaseLost" + lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" + lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" + lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" + max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" + no_pending_copy_operation = "NoPendingCopyOperation" + operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" + pending_copy_operation = "PendingCopyOperation" + previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" + previous_snapshot_not_found = "PreviousSnapshotNotFound" + previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" + sequence_number_condition_not_met = "SequenceNumberConditionNotMet" + sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" + snapshot_count_exceeded = "SnapshotCountExceeded" + snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" + snapshots_present = "SnapshotsPresent" + source_condition_not_met = "SourceConditionNotMet" + system_in_use = "SystemInUse" + target_condition_not_met = "TargetConditionNotMet" + unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" + blob_being_rehydrated = "BlobBeingRehydrated" + blob_archived = "BlobArchived" + blob_not_archived = "BlobNotArchived" + + # Queue values + invalid_marker = "InvalidMarker" + message_not_found = "MessageNotFound" + message_too_large = "MessageTooLarge" + pop_receipt_mismatch = "PopReceiptMismatch" + queue_already_exists = "QueueAlreadyExists" + queue_being_deleted = "QueueBeingDeleted" + queue_disabled = "QueueDisabled" + queue_not_empty = "QueueNotEmpty" + queue_not_found = "QueueNotFound" + + # File values + cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" + client_cache_flush_delay = "ClientCacheFlushDelay" + delete_pending = "DeletePending" + directory_not_empty = "DirectoryNotEmpty" + file_lock_conflict = "FileLockConflict" + invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" + parent_not_found = "ParentNotFound" + read_only_attribute = "ReadOnlyAttribute" + share_already_exists = "ShareAlreadyExists" + share_being_deleted = "ShareBeingDeleted" + share_disabled = "ShareDisabled" + share_not_found = "ShareNotFound" + sharing_violation = "SharingViolation" + share_snapshot_in_progress = "ShareSnapshotInProgress" + share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" + share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" + share_has_snapshots = "ShareHasSnapshots" + container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" + + # DataLake values + content_length_must_be_zero = 'ContentLengthMustBeZero' + path_already_exists = 'PathAlreadyExists' + invalid_flush_position = 'InvalidFlushPosition' + invalid_property_name = 'InvalidPropertyName' + invalid_source_uri = 'InvalidSourceUri' + unsupported_rest_version = 'UnsupportedRestVersion' + file_system_not_found = 'FilesystemNotFound' + path_not_found = 'PathNotFound' + rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' + source_path_not_found = 'SourcePathNotFound' + destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' + file_system_already_exists = 'FilesystemAlreadyExists' + file_system_being_deleted = 'FilesystemBeingDeleted' + invalid_destination_path = 'InvalidDestinationPath' + invalid_rename_source_path = 'InvalidRenameSourcePath' + invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' + lease_is_already_broken = 'LeaseIsAlreadyBroken' + lease_name_mismatch = 'LeaseNameMismatch' + path_conflict = 'PathConflict' + source_path_is_being_deleted = 'SourcePathIsBeingDeleted' + + +class DictMixin(object): + + def __setitem__(self, key, item): + self.__dict__[key] = item + + def __getitem__(self, key): + return self.__dict__[key] + + def __repr__(self): + return str(self) + + def __len__(self): + return len(self.keys()) + + def __delitem__(self, key): + self.__dict__[key] = None + + def __eq__(self, other): + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self): + return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) + + def has_key(self, k): + return k in self.__dict__ + + def update(self, *args, **kwargs): + return self.__dict__.update(*args, **kwargs) + + def keys(self): + return [k for k in self.__dict__ if not k.startswith('_')] + + def values(self): + return [v for k, v in self.__dict__.items() if not k.startswith('_')] + + def items(self): + return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] + + def get(self, key, default=None): + if key in self.__dict__: + return self.__dict__[key] + return default + + +class LocationMode(object): + """ + Specifies the location the request should be sent to. This mode only applies + for RA-GRS accounts which allow secondary read access. All other account types + must use PRIMARY. + """ + + PRIMARY = 'primary' #: Requests should be sent to the primary location. + SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. + + +class ResourceTypes(object): + """ + Specifies the resource types that are accessible with the account SAS. + + :param bool service: + Access to service-level APIs (e.g., Get/Set Service Properties, + Get Service Stats, List Containers/Queues/Shares) + :param bool container: + Access to container-level APIs (e.g., Create/Delete Container, + Create/Delete Queue, Create/Delete Share, + List Blobs/Files and Directories) + :param bool object: + Access to object-level APIs for blobs, queue messages, and + files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) + """ + + def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin + self.service = service + self.container = container + self.object = object + self._str = (('s' if self.service else '') + + ('c' if self.container else '') + + ('o' if self.object else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create a ResourceTypes from a string. + + To specify service, container, or object you need only to + include the first letter of the word in the string. E.g. service and container, + you would provide a string "sc". + + :param str string: Specify service, container, or object in + in the string with the first letter of the word. + :return: A ResourceTypes object + :rtype: ~azure.storage.blob.ResourceTypes + """ + res_service = 's' in string + res_container = 'c' in string + res_object = 'o' in string + + parsed = cls(res_service, res_container, res_object) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class AccountSasPermissions(object): + """ + :class:`~ResourceTypes` class to be used with generate_account_sas + function and for the AccessPolicies used with set_*_acl. There are two types of + SAS which may be used to grant resource access. One is to grant access to a + specific resource (resource-specific). Another is to grant access to the + entire service for a specific account and allow certain operations based on + perms found here. + + :param bool read: + Valid for all signed resources types (Service, Container, and Object). + Permits read permissions to the specified resource type. + :param bool write: + Valid for all signed resources types (Service, Container, and Object). + Permits write permissions to the specified resource type. + :param bool delete: + Valid for Container and Object resource types, except for queue messages. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool list: + Valid for Service and Container resource types only. + :param bool add: + Valid for the following Object resource types only: queue messages, and append blobs. + :param bool create: + Valid for the following Object resource types only: blobs and files. + Users can create new blobs or files, but may not overwrite existing + blobs or files. + :param bool update: + Valid for the following Object resource types only: queue messages. + :param bool process: + Valid for the following Object resource type only: queue messages. + :keyword bool tag: + To enable set or get tags on the blobs in the container. + :keyword bool filter_by_tags: + To enable get blobs by tags, this should be used together with list permission. + :keyword bool set_immutability_policy: + To enable operations related to set/delete immutability policy. + To get immutability policy, you just need read permission. + """ + def __init__(self, read=False, write=False, delete=False, + list=False, # pylint: disable=redefined-builtin + add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): + self.read = read + self.write = write + self.delete = delete + self.delete_previous_version = delete_previous_version + self.list = list + self.add = add + self.create = create + self.update = update + self.process = process + self.tag = kwargs.pop('tag', False) + self.filter_by_tags = kwargs.pop('filter_by_tags', False) + self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('l' if self.list else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('u' if self.update else '') + + ('p' if self.process else '') + + ('f' if self.filter_by_tags else '') + + ('t' if self.tag else '') + + ('i' if self.set_immutability_policy else '') + ) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create AccountSasPermissions from a string. + + To specify read, write, delete, etc. permissions you need only to + include the first letter of the word in the string. E.g. for read and write + permissions you would provide a string "rw". + + :param str permission: Specify permissions in + the string with the first letter of the word. + :return: An AccountSasPermissions object + :rtype: ~azure.storage.blob.AccountSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission + p_list = 'l' in permission + p_add = 'a' in permission + p_create = 'c' in permission + p_update = 'u' in permission + p_process = 'p' in permission + p_tag = 't' in permission + p_filter_by_tags = 'f' in permission + p_set_immutability_policy = 'i' in permission + parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, + list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, + filter_by_tags=p_filter_by_tags, set_immutability_policy=p_set_immutability_policy) + + return parsed + + +class Services(object): + """Specifies the services accessible with the account SAS. + + :param bool blob: + Access for the `~azure.storage.blob.BlobServiceClient` + :param bool queue: + Access for the `~azure.storage.queue.QueueServiceClient` + :param bool fileshare: + Access for the `~azure.storage.fileshare.ShareServiceClient` + """ + + def __init__(self, blob=False, queue=False, fileshare=False): + self.blob = blob + self.queue = queue + self.fileshare = fileshare + self._str = (('b' if self.blob else '') + + ('q' if self.queue else '') + + ('f' if self.fileshare else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create Services from a string. + + To specify blob, queue, or file you need only to + include the first letter of the word in the string. E.g. for blob and queue + you would provide a string "bq". + + :param str string: Specify blob, queue, or file in + in the string with the first letter of the word. + :return: A Services object + :rtype: ~azure.storage.blob.Services + """ + res_blob = 'b' in string + res_queue = 'q' in string + res_file = 'f' in string + + parsed = cls(res_blob, res_queue, res_file) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class UserDelegationKey(object): + """ + Represents a user delegation key, provided to the user by Azure Storage + based on their Azure Active Directory access token. + + The fields are saved as simple strings since the user does not have to interact with this object; + to generate an identify SAS, the user can simply pass it to the right API. + + :ivar str signed_oid: + Object ID of this token. + :ivar str signed_tid: + Tenant ID of the tenant that issued this token. + :ivar str signed_start: + The datetime this token becomes valid. + :ivar str signed_expiry: + The datetime this token expires. + :ivar str signed_service: + What service this key is valid for. + :ivar str signed_version: + The version identifier of the REST service that created this token. + :ivar str value: + The user delegation key. + """ + def __init__(self): + self.signed_oid = None + self.signed_tid = None + self.signed_start = None + self.signed_expiry = None + self.signed_service = None + self.signed_version = None + self.value = None diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/parser.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/parser.py new file mode 100644 index 0000000..c6feba8 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/parser.py @@ -0,0 +1,20 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys + +if sys.version_info < (3,): + def _str(value): + if isinstance(value, unicode): # pylint: disable=undefined-variable + return value.encode('utf-8') + + return str(value) +else: + _str = str + + +def _to_utc_datetime(value): + return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/policies.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/policies.py new file mode 100644 index 0000000..11fc984 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/policies.py @@ -0,0 +1,608 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import re +import random +from time import time +from io import SEEK_SET, UnsupportedOperation +import logging +import uuid +import types +from typing import Any, TYPE_CHECKING +from wsgiref.handlers import format_date_time +try: + from urllib.parse import ( + urlparse, + parse_qsl, + urlunparse, + urlencode, + ) +except ImportError: + from urllib import urlencode # type: ignore + from urlparse import ( # type: ignore + urlparse, + parse_qsl, + urlunparse, + ) + +from azure.core.pipeline.policies import ( + HeadersPolicy, + SansIOHTTPPolicy, + NetworkTraceLoggingPolicy, + HTTPPolicy, + RequestHistory +) +from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError + +from .models import LocationMode + +try: + _unicode_type = unicode # type: ignore +except NameError: + _unicode_type = str + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +def encode_base64(data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def is_exhausted(settings): + """Are we out of retries?""" + retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) + retry_counts = list(filter(None, retry_counts)) + if not retry_counts: + return False + return min(retry_counts) < 0 + + +def retry_hook(settings, **kwargs): + if settings['hook']: + settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) + + +def is_retry(response, mode): + """Is this method/status code retryable? (Based on whitelists and control + variables such as the number of total retries to allow, whether to + respect the Retry-After header, whether this header is present, and + whether the returned status code is on the list of status codes to + be retried upon on the presence of the aforementioned header) + """ + status = response.http_response.status_code + if 300 <= status < 500: + # An exception occured, but in most cases it was expected. Examples could + # include a 309 Conflict or 412 Precondition Failed. + if status == 404 and mode == LocationMode.SECONDARY: + # Response code 404 should be retried if secondary was used. + return True + if status == 408: + # Response code 408 is a timeout and should be retried. + return True + return False + if status >= 500: + # Response codes above 500 with the exception of 501 Not Implemented and + # 505 Version Not Supported indicate a server issue and should be retried. + if status in [501, 505]: + return False + return True + return False + + +def urljoin(base_url, stub_url): + parsed = urlparse(base_url) + parsed = parsed._replace(path=parsed.path + '/' + stub_url) + return parsed.geturl() + + +class QueueMessagePolicy(SansIOHTTPPolicy): + + def on_request(self, request): + message_id = request.context.options.pop('queue_message_id', None) + if message_id: + request.http_request.url = urljoin( + request.http_request.url, + message_id) + + +class StorageHeadersPolicy(HeadersPolicy): + request_id_header_name = 'x-ms-client-request-id' + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + super(StorageHeadersPolicy, self).on_request(request) + current_time = format_date_time(time()) + request.http_request.headers['x-ms-date'] = current_time + + custom_id = request.context.options.pop('client_request_id', None) + request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) + + # def on_response(self, request, response): + # # raise exception if the echoed client request id from the service is not identical to the one we sent + # if self.request_id_header_name in response.http_response.headers: + + # client_request_id = request.http_request.headers.get(self.request_id_header_name) + + # if response.http_response.headers[self.request_id_header_name] != client_request_id: + # raise AzureError( + # "Echoed client request ID: {} does not match sent client request ID: {}. " + # "Service request ID: {}".format( + # response.http_response.headers[self.request_id_header_name], client_request_id, + # response.http_response.headers['x-ms-request-id']), + # response=response.http_response + # ) + + +class StorageHosts(SansIOHTTPPolicy): + + def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument + self.hosts = hosts + super(StorageHosts, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + request.context.options['hosts'] = self.hosts + parsed_url = urlparse(request.http_request.url) + + # Detect what location mode we're currently requesting with + location_mode = LocationMode.PRIMARY + for key, value in self.hosts.items(): + if parsed_url.netloc == value: + location_mode = key + + # See if a specific location mode has been specified, and if so, redirect + use_location = request.context.options.pop('use_location', None) + if use_location: + # Lock retries to the specific location + request.context.options['retry_to_secondary'] = False + if use_location not in self.hosts: + raise ValueError("Attempting to use undefined host location {}".format(use_location)) + if use_location != location_mode: + # Update request URL to use the specified location + updated = parsed_url._replace(netloc=self.hosts[use_location]) + request.http_request.url = updated.geturl() + location_mode = use_location + + request.context.options['location_mode'] = location_mode + + +class StorageLoggingPolicy(NetworkTraceLoggingPolicy): + """A policy that logs HTTP request and response to the DEBUG logger. + + This accepts both global configuration, and per-request level with "enable_http_logger" + """ + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + http_request = request.http_request + options = request.context.options + if options.pop("logging_enable", self.enable_http_logger): + request.context["logging_enable"] = True + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + log_url = http_request.url + query_params = http_request.query + if 'sig' in query_params: + log_url = log_url.replace(query_params['sig'], "sig=*****") + _LOGGER.debug("Request URL: %r", log_url) + _LOGGER.debug("Request method: %r", http_request.method) + _LOGGER.debug("Request headers:") + for header, value in http_request.headers.items(): + if header.lower() == 'authorization': + value = '*****' + elif header.lower() == 'x-ms-copy-source' and 'sig' in value: + # take the url apart and scrub away the signed signature + scheme, netloc, path, params, query, fragment = urlparse(value) + parsed_qs = dict(parse_qsl(query)) + parsed_qs['sig'] = '*****' + + # the SAS needs to be put back together + value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) + + _LOGGER.debug(" %r: %r", header, value) + _LOGGER.debug("Request body:") + + # We don't want to log the binary data of a file upload. + if isinstance(http_request.body, types.GeneratorType): + _LOGGER.debug("File upload") + else: + _LOGGER.debug(str(http_request.body)) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log request: %r", err) + + def on_response(self, request, response): + # type: (PipelineRequest, PipelineResponse, Any) -> None + if response.context.pop("logging_enable", self.enable_http_logger): + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + _LOGGER.debug("Response status: %r", response.http_response.status_code) + _LOGGER.debug("Response headers:") + for res_header, value in response.http_response.headers.items(): + _LOGGER.debug(" %r: %r", res_header, value) + + # We don't want to log binary data if the response is a file. + _LOGGER.debug("Response content:") + pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) + header = response.http_response.headers.get('content-disposition') + + if header and pattern.match(header): + filename = header.partition('=')[2] + _LOGGER.debug("File attachments: %s", filename) + elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): + _LOGGER.debug("Body contains binary data.") + elif response.http_response.headers.get("content-type", "").startswith("image"): + _LOGGER.debug("Body contains image data.") + else: + if response.context.options.get('stream', False): + _LOGGER.debug("Body is streamable") + else: + _LOGGER.debug(response.http_response.text()) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log response: %s", repr(err)) + + +class StorageRequestHook(SansIOHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._request_callback = kwargs.get('raw_request_hook') + super(StorageRequestHook, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, **Any) -> PipelineResponse + request_callback = request.context.options.pop('raw_request_hook', self._request_callback) + if request_callback: + request_callback(request) + + +class StorageResponseHook(HTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(StorageResponseHook, self).__init__() + + def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = self.next.send(request) + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + response_callback(response) + request.context['response_callback'] = response_callback + return response + + +class StorageContentValidation(SansIOHTTPPolicy): + """A simple policy that sends the given headers + with the request. + + This will overwrite any headers already defined in the request. + """ + header_name = 'Content-MD5' + + def __init__(self, **kwargs): # pylint: disable=unused-argument + super(StorageContentValidation, self).__init__() + + @staticmethod + def get_content_md5(data): + md5 = hashlib.md5() # nosec + if isinstance(data, bytes): + md5.update(data) + elif hasattr(data, 'read'): + pos = 0 + try: + pos = data.tell() + except: # pylint: disable=bare-except + pass + for chunk in iter(lambda: data.read(4096), b""): + md5.update(chunk) + try: + data.seek(pos, SEEK_SET) + except (AttributeError, IOError): + raise ValueError("Data should be bytes or a seekable file-like object.") + else: + raise ValueError("Data should be bytes or a seekable file-like object.") + + return md5.digest() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + validate_content = request.context.options.pop('validate_content', False) + if validate_content and request.http_request.method != 'GET': + computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) + request.http_request.headers[self.header_name] = computed_md5 + request.context['validate_content_md5'] = computed_md5 + request.context['validate_content'] = validate_content + + def on_response(self, request, response): + if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): + computed_md5 = request.context.get('validate_content_md5') or \ + encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) + if response.http_response.headers['content-md5'] != computed_md5: + raise AzureError( + 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( + response.http_response.headers['content-md5'], computed_md5), + response=response.http_response + ) + + +class StorageRetryPolicy(HTTPPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + def __init__(self, **kwargs): + self.total_retries = kwargs.pop('retry_total', 10) + self.connect_retries = kwargs.pop('retry_connect', 3) + self.read_retries = kwargs.pop('retry_read', 3) + self.status_retries = kwargs.pop('retry_status', 3) + self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) + super(StorageRetryPolicy, self).__init__() + + def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use + """ + A function which sets the next host location on the request, if applicable. + + :param ~azure.storage.models.RetryContext context: + The retry context containing the previous host location and the request + to evaluate and possibly modify. + """ + if settings['hosts'] and all(settings['hosts'].values()): + url = urlparse(request.url) + # If there's more than one possible location, retry to the alternative + if settings['mode'] == LocationMode.PRIMARY: + settings['mode'] = LocationMode.SECONDARY + else: + settings['mode'] = LocationMode.PRIMARY + updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) + request.url = updated.geturl() + + def configure_retries(self, request): # pylint: disable=no-self-use + body_position = None + if hasattr(request.http_request.body, 'read'): + try: + body_position = request.http_request.body.tell() + except (AttributeError, UnsupportedOperation): + # if body position cannot be obtained, then retries will not work + pass + options = request.context.options + return { + 'total': options.pop("retry_total", self.total_retries), + 'connect': options.pop("retry_connect", self.connect_retries), + 'read': options.pop("retry_read", self.read_retries), + 'status': options.pop("retry_status", self.status_retries), + 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), + 'mode': options.pop("location_mode", LocationMode.PRIMARY), + 'hosts': options.pop("hosts", None), + 'hook': options.pop("retry_hook", None), + 'body_position': body_position, + 'count': 0, + 'history': [] + } + + def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use + """ Formula for computing the current backoff. + Should be calculated by child class. + + :rtype: float + """ + return 0 + + def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + transport.sleep(backoff) + + def increment(self, settings, request, response=None, error=None): + """Increment the retry counters. + + :param response: A pipeline response object. + :param error: An error encountered during the request, or + None if the response was received successfully. + + :return: Whether the retry attempts are exhausted. + """ + settings['total'] -= 1 + + if error and isinstance(error, ServiceRequestError): + # Errors when we're fairly sure that the server did not receive the + # request, so it should be safe to retry. + settings['connect'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + elif error and isinstance(error, ServiceResponseError): + # Errors that occur after the request has been started, so we should + # assume that the server began processing it. + settings['read'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + else: + # Incrementing because of a server error like a 500 in + # status_forcelist and a the given method is in the whitelist + if response: + settings['status'] -= 1 + settings['history'].append(RequestHistory(request, http_response=response)) + + if not is_exhausted(settings): + if request.method not in ['PUT'] and settings['retry_secondary']: + self._set_next_host_location(settings, request) + + # rewind the request body if it is a stream + if request.body and hasattr(request.body, 'read'): + # no position was saved, then retry would not work + if settings['body_position'] is None: + return False + try: + # attempt to rewind the body to the initial position + request.body.seek(settings['body_position'], SEEK_SET) + except (UnsupportedOperation, ValueError): + # if body is not seekable, then retry would not work + return False + settings['count'] += 1 + return True + return False + + def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(StorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(StorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/policies_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/policies_async.py new file mode 100644 index 0000000..e0926b8 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/policies_async.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import asyncio +import random +import logging +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline.policies import AsyncHTTPPolicy +from azure.core.exceptions import AzureError + +from .policies import is_retry, StorageRetryPolicy + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +async def retry_hook(settings, **kwargs): + if settings['hook']: + if asyncio.iscoroutine(settings['hook']): + await settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + else: + settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + + +class AsyncStorageResponseHook(AsyncHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(AsyncStorageResponseHook, self).__init__() + + async def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = await self.next.send(request) + await response.http_response.load_body() + + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + if asyncio.iscoroutine(response_callback): + await response_callback(response) + else: + response_callback(response) + request.context['response_callback'] = response_callback + return response + +class AsyncStorageRetryPolicy(StorageRetryPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + async def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + await transport.sleep(backoff) + + async def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = await self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + await self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + await self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(AsyncStorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(AsyncStorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/request_handlers.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/request_handlers.py new file mode 100644 index 0000000..0d3a2a5 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/request_handlers.py @@ -0,0 +1,278 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) + +import logging +from os import fstat +import stat +from io import (SEEK_END, SEEK_SET, UnsupportedOperation) + +import isodate + +from azure.core.exceptions import raise_with_traceback + + +_LOGGER = logging.getLogger(__name__) + +_REQUEST_DELIMITER_PREFIX = "batch_" +_HTTP1_1_IDENTIFIER = "HTTP/1.1" +_HTTP_LINE_ENDING = "\r\n" + + +def serialize_iso(attr): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: ValueError if format invalid. + """ + if not attr: + return None + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, + utc.tm_hour, utc.tm_min, utc.tm_sec) + return date + 'Z' + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise_with_traceback(ValueError, msg, err) + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise_with_traceback(TypeError, msg, err) + + +def get_length(data): + length = None + # Check if object implements the __len__ method, covers most input cases such as bytearray. + try: + length = len(data) + except: # pylint: disable=bare-except + pass + + if not length: + # Check if the stream is a file-like stream object. + # If so, calculate the size using the file descriptor. + try: + fileno = data.fileno() + except (AttributeError, UnsupportedOperation): + pass + else: + try: + mode = fstat(fileno).st_mode + if stat.S_ISREG(mode) or stat.S_ISLNK(mode): + #st_size only meaningful if regular file or symlink, other types + # e.g. sockets may return misleading sizes like 0 + return fstat(fileno).st_size + except OSError: + # Not a valid fileno, may be possible requests returned + # a socket number? + pass + + # If the stream is seekable and tell() is implemented, calculate the stream size. + try: + current_position = data.tell() + data.seek(0, SEEK_END) + length = data.tell() - current_position + data.seek(current_position, SEEK_SET) + except (AttributeError, UnsupportedOperation): + pass + + return length + + +def read_length(data): + try: + if hasattr(data, 'read'): + read_data = b'' + for chunk in iter(lambda: data.read(4096), b""): + read_data += chunk + return len(read_data), read_data + if hasattr(data, '__iter__'): + read_data = b'' + for chunk in data: + read_data += chunk + return len(read_data), read_data + except: # pylint: disable=bare-except + pass + raise ValueError("Unable to calculate content length, please specify.") + + +def validate_and_format_range_headers( + start_range, end_range, start_range_required=True, + end_range_required=True, check_content_md5=False, align_to_page=False): + # If end range is provided, start range must be provided + if (start_range_required or end_range is not None) and start_range is None: + raise ValueError("start_range value cannot be None.") + if end_range_required and end_range is None: + raise ValueError("end_range value cannot be None.") + + # Page ranges must be 512 aligned + if align_to_page: + if start_range is not None and start_range % 512 != 0: + raise ValueError("Invalid page blob start_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(start_range)) + if end_range is not None and end_range % 512 != 511: + raise ValueError("Invalid page blob end_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(end_range)) + + # Format based on whether end_range is present + range_header = None + if end_range is not None: + range_header = 'bytes={0}-{1}'.format(start_range, end_range) + elif start_range is not None: + range_header = "bytes={0}-".format(start_range) + + # Content MD5 can only be provided for a complete range less than 4MB in size + range_validation = None + if check_content_md5: + if start_range is None or end_range is None: + raise ValueError("Both start and end range requied for MD5 content validation.") + if end_range - start_range > 4 * 1024 * 1024: + raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") + range_validation = 'true' + + return range_header, range_validation + + +def add_metadata_headers(metadata=None): + # type: (Optional[Dict[str, str]]) -> Dict[str, str] + headers = {} + if metadata: + for key, value in metadata.items(): + headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value + return headers + + +def serialize_batch_body(requests, batch_id): + """ + -- + + -- + (repeated as needed) + ---- + + Serializes the requests in this batch to a single HTTP mixed/multipart body. + + :param list[~azure.core.pipeline.transport.HttpRequest] requests: + a list of sub-request for the batch request + :param str batch_id: + to be embedded in batch sub-request delimiter + :return: The body bytes for this batch. + """ + + if requests is None or len(requests) == 0: + raise ValueError('Please provide sub-request(s) for this batch request') + + delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') + newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') + batch_body = list() + + content_index = 0 + for request in requests: + request.headers.update({ + "Content-ID": str(content_index), + "Content-Length": str(0) + }) + batch_body.append(delimiter_bytes) + batch_body.append(_make_body_from_sub_request(request)) + batch_body.append(newline_bytes) + content_index += 1 + + batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) + # final line of body MUST have \r\n at the end, or it will not be properly read by the service + batch_body.append(newline_bytes) + + return bytes().join(batch_body) + + +def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): + """ + Gets the delimiter used for this batch request's mixed/multipart HTTP format. + + :param str batch_id: + Randomly generated id + :param bool is_prepend_dashes: + Whether to include the starting dashes. Used in the body, but non on defining the delimiter. + :param bool is_append_dashes: + Whether to include the ending dashes. Used in the body on the closing delimiter only. + :return: The delimiter, WITHOUT a trailing newline. + """ + + prepend_dashes = '--' if is_prepend_dashes else '' + append_dashes = '--' if is_append_dashes else '' + + return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes + + +def _make_body_from_sub_request(sub_request): + """ + Content-Type: application/http + Content-ID: + Content-Transfer-Encoding: (if present) + + HTTP/ +
:
(repeated as necessary) + Content-Length: + (newline if content length > 0) + (if content length > 0) + + Serializes an http request. + + :param ~azure.core.pipeline.transport.HttpRequest sub_request: + Request to serialize. + :return: The serialized sub-request in bytes + """ + + # put the sub-request's headers into a list for efficient str concatenation + sub_request_body = list() + + # get headers for ease of manipulation; remove headers as they are used + headers = sub_request.headers + + # append opening headers + sub_request_body.append("Content-Type: application/http") + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-ID: ") + sub_request_body.append(headers.pop("Content-ID", "")) + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-Transfer-Encoding: binary") + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + # append HTTP verb and path and query and HTTP version + sub_request_body.append(sub_request.method) + sub_request_body.append(' ') + sub_request_body.append(sub_request.url) + sub_request_body.append(' ') + sub_request_body.append(_HTTP1_1_IDENTIFIER) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) + for header_name, header_value in headers.items(): + if header_value is not None: + sub_request_body.append(header_name) + sub_request_body.append(": ") + sub_request_body.append(header_value) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/response_handlers.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/response_handlers.py new file mode 100644 index 0000000..e5a3514 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/response_handlers.py @@ -0,0 +1,191 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging +from xml.etree.ElementTree import Element + +from azure.core.pipeline.policies import ContentDecodePolicy +from azure.core.exceptions import ( + HttpResponseError, + ResourceNotFoundError, + ResourceModifiedError, + ResourceExistsError, + ClientAuthenticationError, + DecodeError) + +from .parser import _to_utc_datetime +from .models import StorageErrorCode, UserDelegationKey, get_enum_value + +if TYPE_CHECKING: + from datetime import datetime + from azure.core.exceptions import AzureError + + +_LOGGER = logging.getLogger(__name__) + + +class PartialBatchErrorException(HttpResponseError): + """There is a partial failure in batch operations. + + :param str message: The message of the exception. + :param response: Server response to be deserialized. + :param list parts: A list of the parts in multipart response. + """ + + def __init__(self, message, response, parts): + self.parts = parts + super(PartialBatchErrorException, self).__init__(message=message, response=response) + + +def parse_length_from_content_range(content_range): + ''' + Parses the blob length from the content range header: bytes 1-3/65537 + ''' + if content_range is None: + return None + + # First, split in space and take the second half: '1-3/65537' + # Next, split on slash and take the second half: '65537' + # Finally, convert to an int: 65537 + return int(content_range.split(' ', 1)[1].split('/', 1)[1]) + + +def normalize_headers(headers): + normalized = {} + for key, value in headers.items(): + if key.startswith('x-ms-'): + key = key[5:] + normalized[key.lower().replace('-', '_')] = get_enum_value(value) + return normalized + + +def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument + raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} + return {k[10:]: v for k, v in raw_metadata.items()} + + +def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers) + + +def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers), deserialized + + +def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return response.http_response.location_mode, deserialized + + +def process_storage_error(storage_error): # pylint:disable=too-many-statements + raise_error = HttpResponseError + serialized = False + if not storage_error.response: + raise storage_error + # If it is one of those three then it has been serialized prior by the generated layer. + if isinstance(storage_error, (PartialBatchErrorException, + ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError)): + serialized = True + error_code = storage_error.response.headers.get('x-ms-error-code') + error_message = storage_error.message + additional_data = {} + error_dict = {} + try: + error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) + # If it is an XML response + if isinstance(error_body, Element): + error_dict = { + child.tag.lower(): child.text + for child in error_body + } + # If it is a JSON response + elif isinstance(error_body, dict): + error_dict = error_body.get('error', {}) + elif not error_code: + _LOGGER.warning( + 'Unexpected return type % from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) + error_dict = {'message': str(error_body)} + + # If we extracted from a Json or XML response + if error_dict: + error_code = error_dict.get('code') + error_message = error_dict.get('message') + additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} + except DecodeError: + pass + + try: + # This check would be unnecessary if we have already serialized the error + if error_code and not serialized: + error_code = StorageErrorCode(error_code) + if error_code in [StorageErrorCode.condition_not_met, + StorageErrorCode.blob_overwritten]: + raise_error = ResourceModifiedError + if error_code in [StorageErrorCode.invalid_authentication_info, + StorageErrorCode.authentication_failed]: + raise_error = ClientAuthenticationError + if error_code in [StorageErrorCode.resource_not_found, + StorageErrorCode.cannot_verify_copy_source, + StorageErrorCode.blob_not_found, + StorageErrorCode.queue_not_found, + StorageErrorCode.container_not_found, + StorageErrorCode.parent_not_found, + StorageErrorCode.share_not_found]: + raise_error = ResourceNotFoundError + if error_code in [StorageErrorCode.account_already_exists, + StorageErrorCode.account_being_created, + StorageErrorCode.resource_already_exists, + StorageErrorCode.resource_type_mismatch, + StorageErrorCode.blob_already_exists, + StorageErrorCode.queue_already_exists, + StorageErrorCode.container_already_exists, + StorageErrorCode.container_being_deleted, + StorageErrorCode.queue_being_deleted, + StorageErrorCode.share_already_exists, + StorageErrorCode.share_being_deleted]: + raise_error = ResourceExistsError + except ValueError: + # Got an unknown error code + pass + + # Error message should include all the error properties + try: + error_message += "\nErrorCode:{}".format(error_code.value) + except AttributeError: + error_message += "\nErrorCode:{}".format(error_code) + for name, info in additional_data.items(): + error_message += "\n{}:{}".format(name, info) + + # No need to create an instance if it has already been serialized by the generated layer + if serialized: + storage_error.message = error_message + error = storage_error + else: + error = raise_error(message=error_message, response=storage_error.response) + # Ensure these properties are stored in the error instance as well (not just the error message) + error.error_code = error_code + error.additional_info = additional_data + # error.args is what's surfaced on the traceback - show error message in all cases + error.args = (error.message,) + try: + # `from None` prevents us from double printing the exception (suppresses generated layer error context) + exec("raise error from None") # pylint: disable=exec-used # nosec + except SyntaxError: + raise error + + +def parse_to_internal_user_delegation_key(service_user_delegation_key): + internal_user_delegation_key = UserDelegationKey() + internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid + internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid + internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) + internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) + internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service + internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version + internal_user_delegation_key.value = service_user_delegation_key.value + return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/shared_access_signature.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/shared_access_signature.py new file mode 100644 index 0000000..07aad5f --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/shared_access_signature.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from datetime import date + +from .parser import _str, _to_utc_datetime +from .constants import X_MS_VERSION +from . import sign_string, url_quote + + +class QueryStringConstants(object): + SIGNED_SIGNATURE = 'sig' + SIGNED_PERMISSION = 'sp' + SIGNED_START = 'st' + SIGNED_EXPIRY = 'se' + SIGNED_RESOURCE = 'sr' + SIGNED_IDENTIFIER = 'si' + SIGNED_IP = 'sip' + SIGNED_PROTOCOL = 'spr' + SIGNED_VERSION = 'sv' + SIGNED_CACHE_CONTROL = 'rscc' + SIGNED_CONTENT_DISPOSITION = 'rscd' + SIGNED_CONTENT_ENCODING = 'rsce' + SIGNED_CONTENT_LANGUAGE = 'rscl' + SIGNED_CONTENT_TYPE = 'rsct' + START_PK = 'spk' + START_RK = 'srk' + END_PK = 'epk' + END_RK = 'erk' + SIGNED_RESOURCE_TYPES = 'srt' + SIGNED_SERVICES = 'ss' + SIGNED_OID = 'skoid' + SIGNED_TID = 'sktid' + SIGNED_KEY_START = 'skt' + SIGNED_KEY_EXPIRY = 'ske' + SIGNED_KEY_SERVICE = 'sks' + SIGNED_KEY_VERSION = 'skv' + + # for ADLS + SIGNED_AUTHORIZED_OID = 'saoid' + SIGNED_UNAUTHORIZED_OID = 'suoid' + SIGNED_CORRELATION_ID = 'scid' + SIGNED_DIRECTORY_DEPTH = 'sdd' + + @staticmethod + def to_list(): + return [ + QueryStringConstants.SIGNED_SIGNATURE, + QueryStringConstants.SIGNED_PERMISSION, + QueryStringConstants.SIGNED_START, + QueryStringConstants.SIGNED_EXPIRY, + QueryStringConstants.SIGNED_RESOURCE, + QueryStringConstants.SIGNED_IDENTIFIER, + QueryStringConstants.SIGNED_IP, + QueryStringConstants.SIGNED_PROTOCOL, + QueryStringConstants.SIGNED_VERSION, + QueryStringConstants.SIGNED_CACHE_CONTROL, + QueryStringConstants.SIGNED_CONTENT_DISPOSITION, + QueryStringConstants.SIGNED_CONTENT_ENCODING, + QueryStringConstants.SIGNED_CONTENT_LANGUAGE, + QueryStringConstants.SIGNED_CONTENT_TYPE, + QueryStringConstants.START_PK, + QueryStringConstants.START_RK, + QueryStringConstants.END_PK, + QueryStringConstants.END_RK, + QueryStringConstants.SIGNED_RESOURCE_TYPES, + QueryStringConstants.SIGNED_SERVICES, + QueryStringConstants.SIGNED_OID, + QueryStringConstants.SIGNED_TID, + QueryStringConstants.SIGNED_KEY_START, + QueryStringConstants.SIGNED_KEY_EXPIRY, + QueryStringConstants.SIGNED_KEY_SERVICE, + QueryStringConstants.SIGNED_KEY_VERSION, + # for ADLS + QueryStringConstants.SIGNED_AUTHORIZED_OID, + QueryStringConstants.SIGNED_UNAUTHORIZED_OID, + QueryStringConstants.SIGNED_CORRELATION_ID, + QueryStringConstants.SIGNED_DIRECTORY_DEPTH, + ] + + +class SharedAccessSignature(object): + ''' + Provides a factory for creating account access + signature tokens with an account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + :param str x_ms_version: + The service version used to generate the shared access signatures. + ''' + self.account_name = account_name + self.account_key = account_key + self.x_ms_version = x_ms_version + + def generate_account(self, services, resource_types, permission, expiry, start=None, + ip=None, protocol=None): + ''' + Generates a shared access signature for the account. + Use the returned signature with the sas_token parameter of the service + or to create a new account object. + + :param ResourceTypes resource_types: + Specifies the resource types that are accessible with the account + SAS. You can combine values to provide access to more than one + resource type. + :param AccountSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. You can combine + values to provide more than one permission. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + ''' + sas = _SharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_account(services, resource_types) + sas.add_account_signature(self.account_name, self.account_key) + + return sas.get_token() + + +class _SharedAccessHelper(object): + def __init__(self): + self.query_dict = {} + + def _add_query(self, name, val): + if val: + self.query_dict[name] = _str(val) if val is not None else None + + def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): + if isinstance(start, date): + start = _to_utc_datetime(start) + + if isinstance(expiry, date): + expiry = _to_utc_datetime(expiry) + + self._add_query(QueryStringConstants.SIGNED_START, start) + self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) + self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) + self._add_query(QueryStringConstants.SIGNED_IP, ip) + self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) + self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) + + def add_resource(self, resource): + self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) + + def add_id(self, policy_id): + self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) + + def add_account(self, services, resource_types): + self._add_query(QueryStringConstants.SIGNED_SERVICES, services) + self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) + + def add_override_response_headers(self, cache_control, + content_disposition, + content_encoding, + content_language, + content_type): + self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) + self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) + self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) + self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) + self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) + + def add_account_signature(self, account_name, account_key): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + string_to_sign = \ + (account_name + '\n' + + get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + + get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + + get_value_to_append(QueryStringConstants.SIGNED_START) + + get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + get_value_to_append(QueryStringConstants.SIGNED_IP) + + get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(QueryStringConstants.SIGNED_VERSION)) + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key, string_to_sign)) + + def get_token(self): + return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/uploads.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/uploads.py new file mode 100644 index 0000000..941a90f --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/uploads.py @@ -0,0 +1,603 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from concurrent import futures +from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) +from threading import Lock +from itertools import islice +from math import ceil + +import six + +from azure.core.tracing.common import with_current_context + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." + + +def _parallel_uploads(executor, uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(executor.submit(with_current_context(uploader), next_chunk)) + except StopIteration: + break + + # Wait for the remaining uploads to finish + done, _running = futures.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + validate_content=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + validate_content=validate_content, + **kwargs) + if parallel: + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + executor.submit(with_current_context(uploader.process_chunk), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + executor.submit(with_current_context(uploader.process_substream_block), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] + if any(range_ids): + return sorted(range_ids) + return [] + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b"" + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError("Blob data should be of type bytes.") + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b"" or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + def _update_progress(self, length): + if self.progress_lock is not None: + with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = self._upload_chunk(chunk_offset, chunk_data) + self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield index, SubStream(self.stream, index, length, lock) + + def process_substream_block(self, block_data): + return self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + def _upload_substream_block(self, index, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_substream_block_with_progress(self, index, block_stream): + range_id = self._upload_substream_block(index, block_stream) + self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop("modified_access_conditions", None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + self.service.stage_block( + block_id, + len(chunk_data), + chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return index, block_id + + def _upload_substream_block(self, index, block_stream): + try: + block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) + self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + return not any(bytearray(chunk_data)) + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = self.service.upload_pages( + body=chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + def _upload_substream_block(self, index, block_stream): + pass + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + self.current_length = int(self.response_headers["blob_append_offset"]) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + def _upload_substream_block(self, index, block_stream): + pass + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + self.response_headers = self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + def _upload_substream_block(self, index, block_stream): + try: + self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response + + # TODO: Implement this method. + def _upload_substream_block(self, index, block_stream): + pass + + +class SubStream(IOBase): + + def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): + # Python 2.7: file-like objects created with open() typically support seek(), but are not + # derivations of io.IOBase and thus do not implement seekable(). + # Python > 3.0: file-like objects created with open() are derived from io.IOBase. + try: + # only the main thread runs this, so there's no need grabbing the lock + wrapped_stream.seek(0, SEEK_CUR) + except: + raise ValueError("Wrapped stream must support seek().") + + self._lock = lockObj + self._wrapped_stream = wrapped_stream + self._position = 0 + self._stream_begin_index = stream_begin_index + self._length = length + self._buffer = BytesIO() + + # we must avoid buffering more than necessary, and also not use up too much memory + # so the max buffer size is capped at 4MB + self._max_buffer_size = ( + length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE + ) + self._current_buffer_start = 0 + self._current_buffer_size = 0 + super(SubStream, self).__init__() + + def __len__(self): + return self._length + + def close(self): + if self._buffer: + self._buffer.close() + self._wrapped_stream = None + IOBase.close(self) + + def fileno(self): + return self._wrapped_stream.fileno() + + def flush(self): + pass + + def read(self, size=None): + if self.closed: # pylint: disable=using-constant-test + raise ValueError("Stream is closed.") + + if size is None: + size = self._length - self._position + + # adjust if out of bounds + if size + self._position >= self._length: + size = self._length - self._position + + # return fast + if size == 0 or self._buffer.closed: + return b"" + + # attempt first read from the read buffer and update position + read_buffer = self._buffer.read(size) + bytes_read = len(read_buffer) + bytes_remaining = size - bytes_read + self._position += bytes_read + + # repopulate the read buffer from the underlying stream to fulfill the request + # ensure the seek and read operations are done atomically (only if a lock is provided) + if bytes_remaining > 0: + with self._buffer: + # either read in the max buffer size specified on the class + # or read in just enough data for the current block/sub stream + current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) + + # lock is only defined if max_concurrency > 1 (parallel uploads) + if self._lock: + with self._lock: + # reposition the underlying stream to match the start of the data to read + absolute_position = self._stream_begin_index + self._position + self._wrapped_stream.seek(absolute_position, SEEK_SET) + # If we can't seek to the right location, our read will be corrupted so fail fast. + if self._wrapped_stream.tell() != absolute_position: + raise IOError("Stream failed to seek to the desired location.") + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + else: + absolute_position = self._stream_begin_index + self._position + # It's possible that there's connection problem during data transfer, + # so when we retry we don't want to read from current position of wrapped stream, + # instead we should seek to where we want to read from. + if self._wrapped_stream.tell() != absolute_position: + self._wrapped_stream.seek(absolute_position, SEEK_SET) + + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + + if buffer_from_stream: + # update the buffer with new data from the wrapped stream + # we need to note down the start position and size of the buffer, in case seek is performed later + self._buffer = BytesIO(buffer_from_stream) + self._current_buffer_start = self._position + self._current_buffer_size = len(buffer_from_stream) + + # read the remaining bytes from the new buffer and update position + second_read_buffer = self._buffer.read(bytes_remaining) + read_buffer += second_read_buffer + self._position += len(second_read_buffer) + + return read_buffer + + def readable(self): + return True + + def readinto(self, b): + raise UnsupportedOperation + + def seek(self, offset, whence=0): + if whence is SEEK_SET: + start_index = 0 + elif whence is SEEK_CUR: + start_index = self._position + elif whence is SEEK_END: + start_index = self._length + offset = -offset + else: + raise ValueError("Invalid argument for the 'whence' parameter.") + + pos = start_index + offset + + if pos > self._length: + pos = self._length + elif pos < 0: + pos = 0 + + # check if buffer is still valid + # if not, drop buffer + if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: + self._buffer.close() + self._buffer = BytesIO() + else: # if yes seek to correct position + delta = pos - self._current_buffer_start + self._buffer.seek(delta, SEEK_SET) + + self._position = pos + return pos + + def seekable(self): + return True + + def tell(self): + return self._position + + def write(self): + raise UnsupportedOperation + + def writelines(self): + raise UnsupportedOperation + + def writeable(self): + return False + + +class IterStreamer(object): + """ + File-like streaming iterator. + """ + + def __init__(self, generator, encoding="UTF-8"): + self.generator = generator + self.iterator = iter(generator) + self.leftover = b"" + self.encoding = encoding + + def __len__(self): + return self.generator.__len__() + + def __iter__(self): + return self.iterator + + def seekable(self): + return False + + def __next__(self): + return next(self.iterator) + + next = __next__ # Python 2 compatibility. + + def tell(self, *args, **kwargs): + raise UnsupportedOperation("Data generator does not support tell.") + + def seek(self, *args, **kwargs): + raise UnsupportedOperation("Data generator is unseekable.") + + def read(self, size): + data = self.leftover + count = len(self.leftover) + try: + while count < size: + chunk = self.__next__() + if isinstance(chunk, six.text_type): + chunk = chunk.encode(self.encoding) + data += chunk + count += len(chunk) + # This means count < size and what's leftover will be returned in this call. + except StopIteration: + self.leftover = b"" + + if count >= size: + self.leftover = data[size:] + + return data[:size] diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared/uploads_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/uploads_async.py new file mode 100644 index 0000000..5ed192b --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared/uploads_async.py @@ -0,0 +1,395 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +import asyncio +from asyncio import Lock +from itertools import islice +import threading + +from math import ceil + +import six + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder +from .uploads import SubStream, IterStreamer # pylint: disable=unused-import + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' + + +async def _parallel_uploads(uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(asyncio.ensure_future(uploader(next_chunk))) + except StopIteration: + break + + # Wait for the remaining uploads to finish + if running: + done, _running = await asyncio.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +async def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + asyncio.ensure_future(uploader.process_chunk(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [] + for chunk in uploader.get_chunk_streams(): + range_ids.append(await uploader.process_chunk(chunk)) + + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +async def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + asyncio.ensure_future(uploader.process_substream_block(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [] + for block in uploader.get_substream_blocks(): + range_ids.append(await uploader.process_substream_block(block)) + if any(range_ids): + return sorted(range_ids) + return + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = threading.Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b'' + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError('Blob data should be of type bytes.') + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b'' or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + async def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + async def _update_progress(self, length): + if self.progress_lock is not None: + async with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + async def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = await self._upload_chunk(chunk_offset, chunk_data) + await self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield index, SubStream(self.stream, index, length, lock) + + async def process_substream_block(self, block_data): + return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + async def _upload_substream_block(self, index, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_substream_block_with_progress(self, index, block_stream): + range_id = await self._upload_substream_block(index, block_stream) + await self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop('modified_access_conditions', None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + await self.service.stage_block( + block_id, + len(chunk_data), + body=chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + return index, block_id + + async def _upload_substream_block(self, index, block_stream): + try: + block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) + await self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + for each_byte in chunk_data: + if each_byte not in [0, b'\x00']: + return False + return True + + async def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = await self.service.upload_pages( + body=chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + async def _upload_substream_block(self, index, block_stream): + pass + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = await self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + self.current_length = int(self.response_headers['blob_append_offset']) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = await self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + async def _upload_substream_block(self, index, block_stream): + pass + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + self.response_headers = await self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + async def _upload_substream_block(self, index, block_stream): + try: + await self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = await self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + return range_id, response + + # TODO: Implement this method. + async def _upload_substream_block(self, index, block_stream): + pass diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_shared_access_signature.py b/azure/multiapi/storagev2/blob/v2020_10_02/_shared_access_signature.py new file mode 100644 index 0000000..890ef1b --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_shared_access_signature.py @@ -0,0 +1,596 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, TYPE_CHECKING +) + +from ._shared import sign_string, url_quote +from ._shared.constants import X_MS_VERSION +from ._shared.models import Services +from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, \ + QueryStringConstants + +if TYPE_CHECKING: + from datetime import datetime + from ..blob import ( + ResourceTypes, + AccountSasPermissions, + UserDelegationKey, + ContainerSasPermissions, + BlobSasPermissions + ) + + +class BlobQueryStringConstants(object): + SIGNED_TIMESTAMP = 'snapshot' + + +class BlobSharedAccessSignature(SharedAccessSignature): + ''' + Provides a factory for creating blob and container access + signature tokens with a common account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key=None, user_delegation_key=None): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key: + Instead of an account key, the user could pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished by calling get_user_delegation_key on any Blob service object. + ''' + super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) + self.user_delegation_key = user_delegation_key + + def generate_blob(self, container_name, blob_name, snapshot=None, version_id=None, permission=None, + expiry=None, start=None, policy_id=None, ip=None, protocol=None, + cache_control=None, content_disposition=None, + content_encoding=None, content_language=None, + content_type=None, **kwargs): + ''' + Generates a shared access signature for the blob or one of its snapshots. + Use the returned signature with the sas_token parameter of any BlobService. + + :param str container_name: + Name of container. + :param str blob_name: + Name of blob. + :param str snapshot: + The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to grant permission. + :param BlobSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_blob_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + resource_path = container_name + '/' + blob_name + + sas = _BlobSharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(policy_id) + + resource = 'bs' if snapshot else 'b' + resource = 'bv' if version_id else resource + resource = 'd' if kwargs.pop("is_directory", None) else resource + sas.add_resource(resource) + + sas.add_timestamp(snapshot or version_id) + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_info_for_hns_account(**kwargs) + sas.add_resource_signature(self.account_name, self.account_key, resource_path, + user_delegation_key=self.user_delegation_key) + + return sas.get_token() + + def generate_container(self, container_name, permission=None, expiry=None, + start=None, policy_id=None, ip=None, protocol=None, + cache_control=None, content_disposition=None, + content_encoding=None, content_language=None, + content_type=None, **kwargs): + ''' + Generates a shared access signature for the container. + Use the returned signature with the sas_token parameter of any BlobService. + + :param str container_name: + Name of container. + :param ContainerSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_blob_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + sas = _BlobSharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(policy_id) + sas.add_resource('c') + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_info_for_hns_account(**kwargs) + sas.add_resource_signature(self.account_name, self.account_key, container_name, + user_delegation_key=self.user_delegation_key) + return sas.get_token() + + +class _BlobSharedAccessHelper(_SharedAccessHelper): + + def add_timestamp(self, timestamp): + self._add_query(BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp) + + def add_info_for_hns_account(self, **kwargs): + self._add_query(QueryStringConstants.SIGNED_DIRECTORY_DEPTH, kwargs.pop('sdd', None)) + self._add_query(QueryStringConstants.SIGNED_AUTHORIZED_OID, kwargs.pop('preauthorized_agent_object_id', None)) + self._add_query(QueryStringConstants.SIGNED_UNAUTHORIZED_OID, kwargs.pop('agent_object_id', None)) + self._add_query(QueryStringConstants.SIGNED_CORRELATION_ID, kwargs.pop('correlation_id', None)) + + def get_value_to_append(self, query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None): + # pylint: disable = no-member + if path[0] != '/': + path = '/' + path + + canonicalized_resource = '/blob/' + account_name + path + '\n' + + # Form the string to sign from shared_access_policy and canonicalized + # resource. The order of values is important. + string_to_sign = \ + (self.get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + self.get_value_to_append(QueryStringConstants.SIGNED_START) + + self.get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + canonicalized_resource) + + if user_delegation_key is not None: + self._add_query(QueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid) + self._add_query(QueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid) + self._add_query(QueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start) + self._add_query(QueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry) + self._add_query(QueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service) + self._add_query(QueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version) + + string_to_sign += \ + (self.get_value_to_append(QueryStringConstants.SIGNED_OID) + + self.get_value_to_append(QueryStringConstants.SIGNED_TID) + + self.get_value_to_append(QueryStringConstants.SIGNED_KEY_START) + + self.get_value_to_append(QueryStringConstants.SIGNED_KEY_EXPIRY) + + self.get_value_to_append(QueryStringConstants.SIGNED_KEY_SERVICE) + + self.get_value_to_append(QueryStringConstants.SIGNED_KEY_VERSION) + + self.get_value_to_append(QueryStringConstants.SIGNED_AUTHORIZED_OID) + + self.get_value_to_append(QueryStringConstants.SIGNED_UNAUTHORIZED_OID) + + self.get_value_to_append(QueryStringConstants.SIGNED_CORRELATION_ID)) + else: + string_to_sign += self.get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) + + string_to_sign += \ + (self.get_value_to_append(QueryStringConstants.SIGNED_IP) + + self.get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + self.get_value_to_append(QueryStringConstants.SIGNED_VERSION) + + self.get_value_to_append(QueryStringConstants.SIGNED_RESOURCE) + + self.get_value_to_append(BlobQueryStringConstants.SIGNED_TIMESTAMP) + + self.get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + + self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + + self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + + self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + + self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) + + # remove the trailing newline + if string_to_sign[-1] == '\n': + string_to_sign = string_to_sign[:-1] + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key if user_delegation_key is None else user_delegation_key.value, + string_to_sign)) + + def get_token(self): + # a conscious decision was made to exclude the timestamp in the generated token + # this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp + exclude = [BlobQueryStringConstants.SIGNED_TIMESTAMP] + return '&'.join(['{0}={1}'.format(n, url_quote(v)) + for n, v in self.query_dict.items() if v is not None and n not in exclude]) + + +def generate_account_sas( + account_name, # type: str + account_key, # type: str + resource_types, # type: Union[ResourceTypes, str] + permission, # type: Union[AccountSasPermissions, str] + expiry, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): # type: (...) -> str + """Generates a shared access signature for the blob service. + + Use the returned signature with the credential parameter of any BlobServiceClient, + ContainerClient or BlobClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + :param resource_types: + Specifies the resource types that are accessible with the account SAS. + :type resource_types: str or ~azure.storage.blob.ResourceTypes + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.blob.AccountSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_sas_token] + :end-before: [END create_sas_token] + :language: python + :dedent: 8 + :caption: Generating a shared access signature. + """ + sas = SharedAccessSignature(account_name, account_key) + return sas.generate_account( + services=Services(blob=True), + resource_types=resource_types, + permission=permission, + expiry=expiry, + start=start, + ip=ip, + **kwargs + ) # type: ignore + + +def generate_container_sas( + account_name, # type: str + container_name, # type: str + account_key=None, # type: Optional[str] + user_delegation_key=None, # type: Optional[UserDelegationKey] + permission=None, # type: Optional[Union[ContainerSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + policy_id=None, # type: Optional[str] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Any + """Generates a shared access signature for a container. + + Use the returned signature with the credential parameter of any BlobServiceClient, + ContainerClient or BlobClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str container_name: + The name of the container. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + Either `account_key` or `user_delegation_key` must be specified. + :param ~azure.storage.blob.UserDelegationKey user_delegation_key: + Instead of an account shared key, the user could pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.blob.ContainerSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :return: A Shared Access Signature (sas) token. + :rtype: str + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START generate_sas_token] + :end-before: [END generate_sas_token] + :language: python + :dedent: 12 + :caption: Generating a sas token. + """ + if not user_delegation_key and not account_key: + raise ValueError("Either user_delegation_key or account_key must be provided.") + + if user_delegation_key: + sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) + else: + sas = BlobSharedAccessSignature(account_name, account_key=account_key) + return sas.generate_container( + container_name, + permission=permission, + expiry=expiry, + start=start, + policy_id=policy_id, + ip=ip, + **kwargs + ) + + +def generate_blob_sas( + account_name, # type: str + container_name, # type: str + blob_name, # type: str + snapshot=None, # type: Optional[str] + account_key=None, # type: Optional[str] + user_delegation_key=None, # type: Optional[UserDelegationKey] + permission=None, # type: Optional[Union[BlobSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + policy_id=None, # type: Optional[str] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Any + """Generates a shared access signature for a blob. + + Use the returned signature with the credential parameter of any BlobServiceClient, + ContainerClient or BlobClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str container_name: + The name of the container. + :param str blob_name: + The name of the blob. + :param str snapshot: + An optional blob snapshot ID. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + Either `account_key` or `user_delegation_key` must be specified. + :param ~azure.storage.blob.UserDelegationKey user_delegation_key: + Instead of an account shared key, the user could pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.blob.BlobSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + :func:`~azure.storage.blob.ContainerClient.set_container_access_policy()`. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str version_id: + An optional blob version ID. This parameter is only for versioning enabled account + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + if not user_delegation_key and not account_key: + raise ValueError("Either user_delegation_key or account_key must be provided.") + version_id = kwargs.pop('version_id', None) + if version_id and snapshot: + raise ValueError("snapshot and version_id cannot be set at the same time.") + if user_delegation_key: + sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) + else: + sas = BlobSharedAccessSignature(account_name, account_key=account_key) + return sas.generate_blob( + container_name, + blob_name, + snapshot=snapshot, + version_id=version_id, + permission=permission, + expiry=expiry, + start=start, + policy_id=policy_id, + ip=ip, + **kwargs + ) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2020_10_02/_upload_helpers.py new file mode 100644 index 0000000..30d5bfa --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_upload_helpers.py @@ -0,0 +1,306 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from io import SEEK_SET, UnsupportedOperation +from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import + +import six +from azure.core.exceptions import ResourceExistsError, ResourceModifiedError, HttpResponseError + +from ._shared.response_handlers import ( + process_storage_error, + return_response_headers) +from ._shared.models import StorageErrorCode +from ._shared.uploads import ( + upload_data_chunks, + upload_substream_blocks, + BlockBlobChunkUploader, + PageBlobChunkUploader, + AppendBlobChunkUploader) +from ._shared.encryption import generate_blob_encryption_data, encrypt_blob +from ._generated.models import ( + BlockLookupList, + AppendPositionAccessConditions, + ModifiedAccessConditions, +) + +if TYPE_CHECKING: + from datetime import datetime # pylint: disable=unused-import + BlobLeaseClient = TypeVar("BlobLeaseClient") + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' + + +def _convert_mod_error(error): + message = error.message.replace( + "The condition specified using HTTP conditional header(s) is not met.", + "The specified blob already exists.") + message = message.replace("ConditionNotMet", "BlobAlreadyExists") + overwrite_error = ResourceExistsError( + message=message, + response=error.response, + error=error) + overwrite_error.error_code = StorageErrorCode.blob_already_exists + raise overwrite_error + + +def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument + return any([ + modified_access_conditions.if_modified_since, + modified_access_conditions.if_unmodified_since, + modified_access_conditions.if_none_match, + modified_access_conditions.if_match + ]) + + +def upload_block_blob( # pylint: disable=too-many-locals + client=None, + data=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if not overwrite and not _any_conditions(**kwargs): + kwargs['modified_access_conditions'].if_none_match = '*' + adjusted_count = length + if (encryption_options.get('key') is not None) and (adjusted_count is not None): + adjusted_count += (16 - (length % 16)) + blob_headers = kwargs.pop('blob_headers', None) + tier = kwargs.pop('standard_blob_tier', None) + blob_tags_string = kwargs.pop('blob_tags_string', None) + + immutability_policy = kwargs.pop('immutability_policy', None) + immutability_policy_expiry = None if immutability_policy is None else immutability_policy.expiry_time + immutability_policy_mode = None if immutability_policy is None else immutability_policy.policy_mode + legal_hold = kwargs.pop('legal_hold', None) + + # Do single put if the size is smaller than or equal config.max_single_put_size + if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): + try: + data = data.read(length) + if not isinstance(data, six.binary_type): + raise TypeError('Blob data should be of type bytes.') + except AttributeError: + pass + if encryption_options.get('key'): + encryption_data, data = encrypt_blob(data, encryption_options['key']) + headers['x-ms-meta-encryptiondata'] = encryption_data + return client.upload( + body=data, + content_length=adjusted_count, + blob_http_headers=blob_headers, + headers=headers, + cls=return_response_headers, + validate_content=validate_content, + data_stream_total=adjusted_count, + upload_stream_current=0, + tier=tier.value if tier else None, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + **kwargs) + + use_original_upload_path = blob_settings.use_byte_buffer or \ + validate_content or encryption_options.get('required') or \ + blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ + hasattr(stream, 'seekable') and not stream.seekable() or \ + not hasattr(stream, 'seek') or not hasattr(stream, 'tell') + + if use_original_upload_path: + if encryption_options.get('key'): + cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) + headers['x-ms-meta-encryptiondata'] = encryption_data + encryption_options['cek'] = cek + encryption_options['vector'] = iv + block_ids = upload_data_chunks( + service=client, + uploader_class=BlockBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + encryption_options=encryption_options, + headers=headers, + **kwargs + ) + else: + block_ids = upload_substream_blocks( + service=client, + uploader_class=BlockBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + headers=headers, + **kwargs + ) + + block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) + block_lookup.latest = block_ids + return client.commit_block_list( + block_lookup, + blob_http_headers=blob_headers, + cls=return_response_headers, + validate_content=validate_content, + headers=headers, + tier=tier.value if tier else None, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + **kwargs) + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceModifiedError as mod_error: + if not overwrite: + _convert_mod_error(mod_error) + raise + + +def upload_page_blob( + client=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if not overwrite and not _any_conditions(**kwargs): + kwargs['modified_access_conditions'].if_none_match = '*' + if length is None or length < 0: + raise ValueError("A content length must be specified for a Page Blob.") + if length % 512 != 0: + raise ValueError("Invalid page blob size: {0}. " + "The size must be aligned to a 512-byte boundary.".format(length)) + if kwargs.get('premium_page_blob_tier'): + premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') + try: + headers['x-ms-access-tier'] = premium_page_blob_tier.value + except AttributeError: + headers['x-ms-access-tier'] = premium_page_blob_tier + if encryption_options and encryption_options.get('data'): + headers['x-ms-meta-encryptiondata'] = encryption_options['data'] + blob_tags_string = kwargs.pop('blob_tags_string', None) + + response = client.create( + content_length=0, + blob_content_length=length, + blob_sequence_number=None, + blob_http_headers=kwargs.pop('blob_headers', None), + blob_tags_string=blob_tags_string, + cls=return_response_headers, + headers=headers, + **kwargs) + if length == 0: + return response + + kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) + return upload_data_chunks( + service=client, + uploader_class=PageBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_page_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + encryption_options=encryption_options, + headers=headers, + **kwargs) + + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceModifiedError as mod_error: + if not overwrite: + _convert_mod_error(mod_error) + raise + + +def upload_append_blob( # pylint: disable=unused-argument + client=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if length == 0: + return {} + blob_headers = kwargs.pop('blob_headers', None) + append_conditions = AppendPositionAccessConditions( + max_size=kwargs.pop('maxsize_condition', None), + append_position=None) + blob_tags_string = kwargs.pop('blob_tags_string', None) + + try: + if overwrite: + client.create( + content_length=0, + blob_http_headers=blob_headers, + headers=headers, + blob_tags_string=blob_tags_string, + **kwargs) + return upload_data_chunks( + service=client, + uploader_class=AppendBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + append_position_access_conditions=append_conditions, + headers=headers, + **kwargs) + except HttpResponseError as error: + if error.response.status_code != 404: + raise + # rewind the request body if it is a stream + if hasattr(stream, 'read'): + try: + # attempt to rewind the body to the initial position + stream.seek(0, SEEK_SET) + except UnsupportedOperation: + # if body is not seekable, then retry would not work + raise error + client.create( + content_length=0, + blob_http_headers=blob_headers, + headers=headers, + blob_tags_string=blob_tags_string, + **kwargs) + return upload_data_chunks( + service=client, + uploader_class=AppendBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + append_position_access_conditions=append_conditions, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/_version.py b/azure/multiapi/storagev2/blob/v2020_10_02/_version.py new file mode 100644 index 0000000..68dc953 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/_version.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +VERSION = "12.9.0" diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/__init__.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/__init__.py new file mode 100644 index 0000000..33c1031 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/aio/__init__.py @@ -0,0 +1,141 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os + +from .._models import BlobType +from .._shared.policies_async import ExponentialRetry, LinearRetry +from ._blob_client_async import BlobClient +from ._container_client_async import ContainerClient +from ._blob_service_client_async import BlobServiceClient +from ._lease_async import BlobLeaseClient +from ._download_async import StorageStreamDownloader + + +async def upload_blob_to_url( + blob_url, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + credential=None, # type: Any + **kwargs): + # type: (...) -> dict[str, Any] + """Upload data to a given URL + + The data will be uploaded as a block blob. + + :param str blob_url: + The full URI to the blob. This can also include a SAS token. + :param data: + The data to upload. This can be bytes, text, an iterable or a file-like object. + :type data: bytes or str or Iterable + :param credential: + The credentials with which to authenticate. This is optional if the + blob URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword bool overwrite: + Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob_to_url will overwrite any existing data. If set to False, the + operation will fail with a ResourceExistsError. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword dict(str,str) metadata: + Name-value pairs associated with the blob as metadata. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword str encoding: + Encoding to use if text is supplied as input. Defaults to UTF-8. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: dict(str, Any) + """ + async with BlobClient.from_blob_url(blob_url, credential=credential) as client: + return await client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) + + +async def _download_to_stream(client, handle, **kwargs): + """Download data to specified open file-handle.""" + stream = await client.download_blob(**kwargs) + await stream.readinto(handle) + + +async def download_blob_from_url( + blob_url, # type: str + output, # type: str + credential=None, # type: Any + **kwargs): + # type: (...) -> None + """Download the contents of a blob to a local file or stream. + + :param str blob_url: + The full URI to the blob. This can also include a SAS token. + :param output: + Where the data should be downloaded to. This could be either a file path to write to, + or an open IO handle to write to. + :type output: str or writable stream + :param credential: + The credentials with which to authenticate. This is optional if the + blob URL already has a SAS token or the blob is public. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword bool overwrite: + Whether the local file should be overwritten if it already exists. The default value is + `False` - in which case a ValueError will be raised if the file already exists. If set to + `True`, an attempt will be made to write to the existing file. If a stream handle is passed + in, this value is ignored. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :keyword int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :rtype: None + """ + overwrite = kwargs.pop('overwrite', False) + async with BlobClient.from_blob_url(blob_url, credential=credential) as client: + if hasattr(output, 'write'): + await _download_to_stream(client, output, **kwargs) + else: + if not overwrite and os.path.isfile(output): + raise ValueError("The file '{}' already exists.".format(output)) + with open(output, 'wb') as file_handle: + await _download_to_stream(client, file_handle, **kwargs) + + +__all__ = [ + 'upload_blob_to_url', + 'download_blob_from_url', + 'BlobServiceClient', + 'ContainerClient', + 'BlobClient', + 'BlobLeaseClient', + 'ExponentialRetry', + 'LinearRetry', + 'StorageStreamDownloader' +] diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_blob_client_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_blob_client_async.py new file mode 100644 index 0000000..97b412e --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_blob_client_async.py @@ -0,0 +1,2617 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines, invalid-overridden-method +from functools import partial +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, + TYPE_CHECKING +) + +from azure.core.pipeline import AsyncPipeline + +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.exceptions import ResourceNotFoundError, HttpResponseError, ResourceExistsError + +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.policies_async import ExponentialRetry +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._deserialize import get_page_ranges_result, parse_tags, deserialize_pipeline_response_into_cls +from .._serialize import get_modify_conditions, get_api_version, get_access_conditions +from .._generated.aio import AzureBlobStorage +from .._generated.models import CpkInfo +from .._deserialize import deserialize_blob_properties +from .._blob_client import BlobClient as BlobClientBase +from ._upload_helpers import ( + upload_block_blob, + upload_append_blob, + upload_page_blob) +from .._models import BlobType, BlobBlock, BlobProperties +from ._lease_async import BlobLeaseClient +from ._download_async import StorageStreamDownloader + + +if TYPE_CHECKING: + from datetime import datetime + from .._models import ( # pylint: disable=unused-import + ContentSettings, + PremiumPageBlobTier, + StandardBlobTier, + SequenceNumberAction + ) + + +class BlobClient(AsyncStorageAccountHostsMixin, BlobClientBase): # pylint: disable=too-many-public-methods + """A client to interact with a specific blob, although that blob may not yet exist. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the blob, + use the :func:`from_blob_url` classmethod. + :param container_name: The container name for the blob. + :type container_name: str + :param blob_name: The name of the blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob_name: str + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication_async.py + :start-after: [START create_blob_client] + :end-before: [END create_blob_client] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a URL to a public blob (no auth needed). + + .. literalinclude:: ../samples/blob_samples_authentication_async.py + :start-after: [START create_blob_client_sas_url] + :end-before: [END create_blob_client_sas_url] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a SAS URL to a blob. + """ + def __init__( + self, account_url, # type: str + container_name, # type: str + blob_name, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(BlobClient, self).__init__( + account_url, + container_name=container_name, + blob_name=blob_name, + snapshot=snapshot, + credential=credential, + **kwargs) + self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + @distributed_trace_async + async def get_account_information(self, **kwargs): # type: ignore + # type: (Optional[int]) -> Dict[str, str] + """Gets information related to the storage account in which the blob resides. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + """ + try: + return await self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_blob_from_url(self, source_url, **kwargs): + # type: (str, Any) -> Dict[str, Any] + """ + Creates a new Block Blob where the content of the blob is read from a given URL. + The content of an existing blob is overwritten with the new blob. + + :param str source_url: + A URL of up to 2 KB in length that specifies a file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. + :keyword bool include_source_blob_properties: + Indicates if properties from the source blob should be copied. Defaults to True. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + :paramtype tags: dict(str, str) + :keyword bytearray source_content_md5: + Specify the md5 that is used to verify the integrity of the source bytes. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword destination_lease: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + """ + options = self._upload_blob_from_url_options( + source_url=self._encode_source_url(source_url), + **kwargs) + try: + return await self._client.block_blob.put_blob_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_blob( + self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] + length=None, # type: Optional[int] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Any + """Creates a new blob from a data source with automatic chunking. + + :param data: The blob data to upload. + :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be + either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. The exception to the above is with Append + blob types: if set to False and the data already exists, an error will not be raised + and the data will be appended to the existing blob. If set overwrite=True, then the existing + append blob will be deleted, and a new one created. Defaults to False. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + If specified, upload_blob only succeeds if the + blob's lease is active and matches this ID. + Required if the blob has an active lease. + :paramtype: ~azure.storage.blob.aio.BlobLeaseClient + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + Currently this parameter of upload_blob() API is for BlockBlob only. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + Currently this parameter of upload_blob() API is for BlockBlob only. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int max_concurrency: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world_async.py + :start-after: [START upload_a_blob] + :end-before: [END upload_a_blob] + :language: python + :dedent: 16 + :caption: Upload a blob to the container. + """ + options = self._upload_blob_options( + data, + blob_type=blob_type, + length=length, + metadata=metadata, + **kwargs) + if blob_type == BlobType.BlockBlob: + return await upload_block_blob(**options) + if blob_type == BlobType.PageBlob: + return await upload_page_blob(**options) + return await upload_append_blob(**options) + + @distributed_trace_async + async def download_blob(self, offset=None, length=None, **kwargs): + # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader + """Downloads a blob to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the blob into + a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. + + :param int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to download. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, download_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword str encoding: + Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.blob.aio.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world_async.py + :start-after: [START download_a_blob] + :end-before: [END download_a_blob] + :language: python + :dedent: 16 + :caption: Download a blob. + """ + options = self._download_blob_options( + offset=offset, + length=length, + **kwargs) + downloader = StorageStreamDownloader(**options) + await downloader._setup() # pylint: disable=protected-access + return downloader + + @distributed_trace_async + async def delete_blob(self, delete_snapshots=None, **kwargs): + # type: (str, Any) -> None + """Marks the specified blob for deletion. + + The blob is later deleted during garbage collection. + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the delete_blob() + operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob + and retains the blob for a specified number of days. + After the specified number of days, the blob's data is removed from the service during garbage collection. + Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` + option. Soft-deleted blob can be restored using :func:`undelete` operation. + + :param str delete_snapshots: + Required if the blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword lease: + Required if the blob has an active lease. If specified, delete_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world_async.py + :start-after: [START delete_blob] + :end-before: [END delete_blob] + :language: python + :dedent: 16 + :caption: Delete a blob. + """ + options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) + try: + await self._client.blob.delete(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def undelete_blob(self, **kwargs): + # type: (Any) -> None + """Restores soft-deleted blobs or snapshots. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START undelete_blob] + :end-before: [END undelete_blob] + :language: python + :dedent: 12 + :caption: Undeleting a blob. + """ + try: + await self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a blob exists with the defined parameters, and returns + False otherwise. + + :kwarg str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to check if it exists. + :kwarg int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + try: + await self._client.blob.get_properties( + snapshot=self.snapshot, + **kwargs) + return True + # Encrypted with CPK + except ResourceExistsError: + return True + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceNotFoundError: + return False + + @distributed_trace_async + async def get_blob_properties(self, **kwargs): + # type: (Any) -> BlobProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the blob. It does not return the content of the blob. + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to get properties. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: BlobProperties + :rtype: ~azure.storage.blob.BlobProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START get_blob_properties] + :end-before: [END get_blob_properties] + :language: python + :dedent: 12 + :caption: Getting the properties for a blob. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + try: + cls_method = kwargs.pop('cls', None) + if cls_method: + kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method) + blob_props = await self._client.blob.get_properties( + timeout=kwargs.pop('timeout', None), + version_id=kwargs.pop('version_id', None), + snapshot=self.snapshot, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=kwargs.pop('cls', None) or deserialize_blob_properties, + cpk_info=cpk_info, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + blob_props.name = self.blob_name + if isinstance(blob_props, BlobProperties): + blob_props.container = self.container_name + blob_props.snapshot = self.snapshot + return blob_props # type: ignore + + @distributed_trace_async + async def set_http_headers(self, content_settings=None, **kwargs): + # type: (Optional[ContentSettings], Any) -> None + """Sets system properties on the blob. + + If one property is set for the content_settings, all properties will be overridden. + + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + options = self._set_http_headers_options(content_settings=content_settings, **kwargs) + try: + return await self._client.blob.set_http_headers(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_blob_metadata(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] + """Sets user-defined metadata for the blob as one or more name-value pairs. + + :param metadata: + Dict containing name and value pairs. Each call to this operation + replaces all existing metadata attached to the blob. To remove all + metadata from the blob, call this operation with no metadata headers. + :type metadata: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + """ + options = self._set_blob_metadata_options(metadata=metadata, **kwargs) + try: + return await self._client.blob.set_metadata(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_immutability_policy(self, immutability_policy, **kwargs): + # type: (**Any) -> Dict[str, str] + """The Set Immutability Policy operation sets the immutability policy on the blob. + + .. versionadded:: 12.10.0 + This operation was introduced in API version '2020-10-02'. + + :param ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, str] + """ + + kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time + kwargs['immutability_policy_mode'] = immutability_policy.policy_mode + return await self._client.blob.set_immutability_policy(cls=return_response_headers, **kwargs) + + @distributed_trace_async() + async def delete_immutability_policy(self, **kwargs): + # type: (**Any) -> None + """The Delete Immutability Policy operation deletes the immutability policy on the blob. + + .. versionadded:: 12.10.0 + This operation was introduced in API version '2020-10-02'. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, str] + """ + + await self._client.blob.delete_immutability_policy(**kwargs) + + @distributed_trace_async + async def set_legal_hold(self, legal_hold, **kwargs): + # type: (bool, **Any) -> Dict[str, Union[str, datetime, bool]] + """The Set Legal Hold operation sets a legal hold on the blob. + + .. versionadded:: 12.10.0 + This operation was introduced in API version '2020-10-02'. + + :param bool legal_hold: + Specified if a legal hold should be set on the blob. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, Union[str, datetime, bool]] + """ + + return await self._client.blob.set_legal_hold(legal_hold, cls=return_response_headers, **kwargs) + + @distributed_trace_async + async def create_page_blob( # type: ignore + self, size, # type: int + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Creates a new Page Blob of the specified size. + + :param int size: + This specifies the maximum size for the page blob, up to 1 TB. + The page blob size must be aligned to a 512-byte boundary. + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword int sequence_number: + Only for Page blobs. The sequence number is a user-controlled value that you can use to + track requests. The value of the sequence number must be between 0 + and 2^63 - 1.The default value is 0. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict[str, Any] + """ + options = self._create_page_blob_options( + size, + content_settings=content_settings, + metadata=metadata, + premium_page_blob_tier=premium_page_blob_tier, + **kwargs) + try: + return await self._client.page_blob.create(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def create_append_blob(self, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] + """Creates a new Append Blob. + + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict[str, Any] + """ + options = self._create_append_blob_options( + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return await self._client.append_blob.create(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def create_snapshot(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] + """Creates a snapshot of the blob. + + A snapshot is a read-only version of a blob that's taken at a point in time. + It can be read, copied, or deleted, but not modified. Snapshots provide a way + to back up a blob as it appears at a moment in time. + + A snapshot of a blob has the same name as the base blob from which the snapshot + is taken, with a DateTime value appended to indicate the time at which the + snapshot was taken. + + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START create_blob_snapshot] + :end-before: [END create_blob_snapshot] + :language: python + :dedent: 12 + :caption: Create a snapshot of the blob. + """ + options = self._create_snapshot_options(metadata=metadata, **kwargs) + try: + return await self._client.blob.create_snapshot(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): + # type: (str, Optional[Dict[str, str]], bool, Any) -> Any + """Copies a blob asynchronously. + + This operation returns a copy operation + object that can be used to wait on the completion of the operation, + as well as check status or abort the copy operation. + The Blob service copies blobs on a best-effort basis. + + The source blob for a copy operation may be a block blob, an append blob, + or a page blob. If the destination blob already exists, it must be of the + same blob type as the source blob. Any existing destination blob will be + overwritten. The destination blob cannot be modified while a copy operation + is in progress. + + When copying from a page blob, the Blob service creates a destination page + blob of the source blob's length, initially containing all zeroes. Then + the source page ranges are enumerated, and non-empty ranges are copied. + + For a block blob or an append blob, the Blob service creates a committed + blob of zero length before returning from this operation. When copying + from a block blob, all committed blocks and their block IDs are copied. + Uncommitted blocks are not copied. At the end of the copy operation, the + destination blob will have the same committed block count as the source. + + When copying from an append blob, all committed blocks are copied. At the + end of the copy operation, the destination blob will have the same committed + block count as the source. + + For all blob types, you can call status() on the returned polling object + to check the status of the copy operation, or wait() to block until the + operation is complete. The final blob will be committed when the copy completes. + + :param str source_url: + A URL of up to 2 KB in length that specifies a file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :param metadata: + Name-value pairs associated with the blob as metadata. If no name-value + pairs are specified, the operation will copy the metadata from the + source blob or file to the destination blob. If one or more name-value + pairs are specified, the destination blob is created with the specified + metadata, and metadata is not copied from the source blob or file. + :type metadata: dict(str, str) + :param bool incremental_copy: + Copies the snapshot of the source page blob to a destination page blob. + The snapshot is copied such that only the differential changes between + the previously copied snapshot are transferred to the destination. + The copied snapshots are complete copies of the original snapshot and + can be read or copied from as usual. Defaults to False. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source + blob has been modified since the specified date/time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source blob + has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has been modified since the specified date/time. + If the destination blob has not been modified, the Blob service returns + status code 412 (Precondition Failed). + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has not been modified since the specified + date/time. If the destination blob has been modified, the Blob service + returns status code 412 (Precondition Failed). + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword destination_lease: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :paramtype destination_lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword source_lease: + Specify this to perform the Copy Blob operation only if + the lease ID given matches the active lease ID of the source blob. + :paramtype source_lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword bool seal_destination_blob: + Seal the destination append blob. This operation is only for append blob. + + .. versionadded:: 12.4.0 + + :keyword bool requires_sync: + Enforces that the service will not return a response until the copy is complete. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. This option is only available when `incremental_copy` is + set to False and `requires_sync` is set to True. + :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). + :rtype: dict[str, Union[str, ~datetime.datetime]] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START copy_blob_from_url] + :end-before: [END copy_blob_from_url] + :language: python + :dedent: 16 + :caption: Copy a blob from a URL. + """ + options = self._start_copy_from_url_options( + source_url=self._encode_source_url(source_url), + metadata=metadata, + incremental_copy=incremental_copy, + **kwargs) + try: + if incremental_copy: + return await self._client.page_blob.copy_incremental(**options) + return await self._client.blob.start_copy_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def abort_copy(self, copy_id, **kwargs): + # type: (Union[str, Dict[str, Any], BlobProperties], Any) -> None + """Abort an ongoing copy operation. + + This will leave a destination blob with zero length and full metadata. + This will raise an error if the copy operation has already ended. + + :param copy_id: + The copy operation to abort. This can be either an ID, or an + instance of BlobProperties. + :type copy_id: str or ~azure.storage.blob.BlobProperties + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START abort_copy_blob_from_url] + :end-before: [END abort_copy_blob_from_url] + :language: python + :dedent: 16 + :caption: Abort copying a blob from URL. + """ + options = self._abort_copy_options(copy_id, **kwargs) + try: + await self._client.blob.abort_copy_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): + # type: (int, Optional[str], Any) -> BlobLeaseClient + """Requests a new lease. + + If the blob does not have an active lease, the Blob + Service creates a lease on the blob and returns a new lease. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Blob Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A BlobLeaseClient object. + :rtype: ~azure.storage.blob.aio.BlobLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START acquire_lease_on_blob] + :end-before: [END acquire_lease_on_blob] + :language: python + :dedent: 12 + :caption: Acquiring a lease on a blob. + """ + lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore + await lease.acquire(lease_duration=lease_duration, **kwargs) + return lease + + @distributed_trace_async + async def set_standard_blob_tier(self, standard_blob_tier, **kwargs): + # type: (Union[str, StandardBlobTier], Any) -> None + """This operation sets the tier on a block blob. + + A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + :param standard_blob_tier: + Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', + 'Archive'. The hot tier is optimized for storing data that is accessed + frequently. The cool storage tier is optimized for storing data that + is infrequently accessed and stored for at least a month. The archive + tier is optimized for storing data that is rarely accessed and stored + for at least six months with flexible latency requirements. + :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if standard_blob_tier is None: + raise ValueError("A StandardBlobTier must be specified") + try: + await self._client.blob.set_tier( + tier=standard_blob_tier, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def stage_block( + self, block_id, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> None + """Creates a new block to be committed as part of a blob. + + :param str block_id: A string value that identifies the block. + The string should be less than or equal to 64 bytes in size. + For a given blob, the block_id must be the same size for each block. + :param data: The blob data. + :param int length: Size of the block. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword str encoding: + Defaults to UTF-8. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + options = self._stage_block_options( + block_id, + data, + length=length, + **kwargs) + try: + return await self._client.block_blob.stage_block(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def stage_block_from_url( + self, block_id, # type: Union[str, int] + source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + source_content_md5=None, # type: Optional[Union[bytes, bytearray]] + **kwargs + ): + # type: (...) -> None + """Creates a new block to be committed as part of a blob where + the contents are read from a URL. + + :param str block_id: A string value that identifies the block. + The string should be less than or equal to 64 bytes in size. + For a given blob, the block_id must be the same size for each block. + :param str source_url: The URL. + :param int source_offset: + Start of byte range to use for the block. + Must be set if source length is provided. + :param int source_length: The size of the block in bytes. + :param bytearray source_content_md5: + Specify the md5 calculated for the range of + bytes that must be read from the copy source. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + :rtype: None + """ + options = self._stage_block_from_url_options( + block_id, + source_url=self._encode_source_url(source_url), + source_offset=source_offset, + source_length=source_length, + source_content_md5=source_content_md5, + **kwargs) + try: + return await self._client.block_blob.stage_block_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_block_list(self, block_list_type="committed", **kwargs): + # type: (Optional[str], Any) -> Tuple[List[BlobBlock], List[BlobBlock]] + """The Get Block List operation retrieves the list of blocks that have + been uploaded as part of a block blob. + + :param str block_list_type: + Specifies whether to return the list of committed + blocks, the list of uncommitted blocks, or both lists together. + Possible values include: 'committed', 'uncommitted', 'all' + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A tuple of two lists - committed and uncommitted blocks + :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) + """ + access_conditions = get_access_conditions(kwargs.pop('kease', None)) + mod_conditions = get_modify_conditions(kwargs) + try: + blocks = await self._client.block_blob.get_block_list( + list_type=block_list_type, + snapshot=self.snapshot, + timeout=kwargs.pop('timeout', None), + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return self._get_block_list_result(blocks) + + @distributed_trace_async + async def commit_block_list( # type: ignore + self, block_list, # type: List[BlobBlock] + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """The Commit Block List operation writes a blob by specifying the list of + block IDs that make up the blob. + + :param list block_list: + List of Blockblobs. + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict[str, str] + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._commit_block_list_options( + block_list, + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return await self._client.block_blob.commit_block_list(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): + # type: (Union[str, PremiumPageBlobTier], **Any) -> None + """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. + + :param premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if premium_page_blob_tier is None: + raise ValueError("A PremiumPageBlobTiermust be specified") + try: + await self._client.blob.set_tier( + tier=premium_page_blob_tier, + timeout=kwargs.pop('timeout', None), + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_blob_tags(self, tags=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. + Each call to this operation replaces all existing tags attached to the blob. To remove all + tags from the blob, call this operation with no tags set. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :param tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + :type tags: dict(str, str) + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + :keyword bool validate_content: + If true, calculates an MD5 hash of the tags content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + options = self._set_blob_tags_options(tags=tags, **kwargs) + try: + return await self._client.blob.set_tags(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_blob_tags(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """The Get Tags operation enables users to get tags on a blob or specific blob version, but not snapshot. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to add tags to. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, str] + """ + options = self._get_blob_tags_options(**kwargs) + try: + _, tags = await self._client.blob.get_tags(**options) + return parse_tags(tags) # pylint: disable=protected-access + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_page_ranges( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] + **kwargs + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a Page Blob or snapshot + of a page blob. + + :param int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param str previous_snapshot_diff: + The snapshot diff parameter that contains an opaque DateTime value that + specifies a previous blob snapshot to be compared + against a more recent snapshot or the current blob. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. + The first element are filled page ranges, the 2nd element is cleared page ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_page_ranges_options( + offset=offset, + length=length, + previous_snapshot_diff=previous_snapshot_diff, + **kwargs) + try: + if previous_snapshot_diff: + ranges = await self._client.page_blob.get_page_ranges_diff(**options) + else: + ranges = await self._client.page_blob.get_page_ranges(**options) + except HttpResponseError as error: + process_storage_error(error) + return get_page_ranges_result(ranges) + + @distributed_trace_async + async def get_page_range_diff_for_managed_disk( + self, previous_snapshot_url, # type: str + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a managed disk or snapshot. + + .. note:: + This operation is only available for managed disk accounts. + + .. versionadded:: 12.2.0 + This operation was introduced in API version '2019-07-07'. + + :param previous_snapshot_url: + Specifies the URL of a previous snapshot of the managed disk. + The response will only contain pages that were changed between the target blob and + its previous snapshot. + :param int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. + The first element are filled page ranges, the 2nd element is cleared page ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_page_ranges_options( + offset=offset, + length=length, + prev_snapshot_url=previous_snapshot_url, + **kwargs) + try: + ranges = await self._client.page_blob.get_page_ranges_diff(**options) + except HttpResponseError as error: + process_storage_error(error) + return get_page_ranges_result(ranges) + + @distributed_trace_async + async def set_sequence_number( # type: ignore + self, sequence_number_action, # type: Union[str, SequenceNumberAction] + sequence_number=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the blob sequence number. + + :param str sequence_number_action: + This property indicates how the service should modify the blob's sequence + number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. + :param str sequence_number: + This property sets the blob's sequence number. The sequence number is a + user-controlled property that you can use to track requests and manage + concurrency issues. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._set_sequence_number_options( + sequence_number_action, sequence_number=sequence_number, **kwargs) + try: + return await self._client.page_blob.update_sequence_number(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def resize_blob(self, size, **kwargs): + # type: (int, Any) -> Dict[str, Union[str, datetime]] + """Resizes a page blob to the specified size. + + If the specified value is less than the current size of the blob, + then all pages above the specified value are cleared. + + :param int size: + Size used to resize blob. Maximum size for a page blob is up to 1 TB. + The page blob size must be aligned to a 512-byte boundary. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._resize_blob_options(size, **kwargs) + try: + return await self._client.page_blob.resize(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_page( # type: ignore + self, page, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """The Upload Pages operation writes a range of pages to a page blob. + + :param bytes page: + Content of the page. + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._upload_page_options( + page=page, + offset=offset, + length=length, + **kwargs) + try: + return await self._client.page_blob.upload_pages(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_pages_from_url(self, source_url, # type: str + offset, # type: int + length, # type: int + source_offset, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """ + The Upload Pages operation writes a range of pages to a page blob where + the contents are read from a URL. + + :param str source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (length-offset). + :keyword bytes source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + """ + + options = self._upload_pages_from_url_options( + source_url=self._encode_source_url(source_url), + offset=offset, + length=length, + source_offset=source_offset, + **kwargs + ) + try: + return await self._client.page_blob.upload_pages_from_url(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def clear_page(self, offset, length, **kwargs): + # type: (int, int, Any) -> Dict[str, Union[str, datetime]] + """Clears a range of pages. + + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._clear_page_options(offset, length, **kwargs) + try: + return await self._client.page_blob.clear_pages(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def append_block( # type: ignore + self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """Commits a new block of data to the end of the existing append blob. + + :param data: + Content of the block. + :param int length: + Size of the block in bytes. + :keyword bool validate_content: + If true, calculates an MD5 hash of the block content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._append_block_options( + data, + length=length, + **kwargs + ) + try: + return await self._client.append_blob.append_block(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async() + async def append_block_from_url(self, copy_source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """ + Creates a new block to be committed as part of a blob, where the contents are read from a source url. + + :param str copy_source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + :param int source_length: + This indicates the end of the range of bytes that has to be taken from the copy source. + :keyword bytearray source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the + AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + """ + options = self._append_block_from_url_options( + copy_source_url=self._encode_source_url(copy_source_url), + source_offset=source_offset, + source_length=source_length, + **kwargs + ) + try: + return await self._client.append_blob.append_block_from_url(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async() + async def seal_append_blob(self, **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """The Seal operation seals the Append Blob to make it read-only. + + .. versionadded:: 12.4.0 + + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._seal_append_blob_options(**kwargs) + try: + return await self._client.append_blob.seal(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _get_container_client(self): # pylint: disable=client-method-missing-kwargs + # type: (...) -> ContainerClient + """Get a client to interact with the blob's parent container. + + The container need not already exist. Defaults to current blob's credentials. + + :returns: A ContainerClient. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START get_container_client_from_blob_client] + :end-before: [END get_container_client_from_blob_client] + :language: python + :dedent: 12 + :caption: Get container client from blob object. + """ + from ._container_client_async import ContainerClient + if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + else: + _pipeline = self._pipeline # pylint: disable = protected-access + return ContainerClient( + "{}://{}".format(self.scheme, self.primary_hostname), container_name=self.container_name, + credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, + _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, + _pipeline=_pipeline, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_blob_service_client_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_blob_service_client_async.py new file mode 100644 index 0000000..9cb1563 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_blob_service_client_async.py @@ -0,0 +1,676 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import functools +import warnings +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, + TYPE_CHECKING +) + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import AsyncPipeline +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.async_paging import AsyncItemPaged + +from .._shared.models import LocationMode +from .._shared.policies_async import ExponentialRetry +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._shared.parser import _to_utc_datetime +from .._shared.response_handlers import parse_to_internal_user_delegation_key +from .._generated.aio import AzureBlobStorage +from .._generated.models import StorageServiceProperties, KeyInfo +from .._blob_service_client import BlobServiceClient as BlobServiceClientBase +from ._container_client_async import ContainerClient +from ._blob_client_async import BlobClient +from .._models import ContainerProperties +from .._deserialize import service_stats_deserialize, service_properties_deserialize +from .._serialize import get_api_version +from ._models import ContainerPropertiesPaged, FilteredBlobPaged + +if TYPE_CHECKING: + from datetime import datetime + from .._shared.models import AccountSasPermissions, ResourceTypes, UserDelegationKey + from ._lease_async import BlobLeaseClient + from .._models import ( + BlobProperties, + PublicAccess, + BlobAnalyticsLogging, + Metrics, + CorsRule, + RetentionPolicy, + StaticWebsite, + ) + + +class BlobServiceClient(AsyncStorageAccountHostsMixin, BlobServiceClientBase): + """A client to interact with the Blob Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete containers within the account. + For operations relating to a specific container or blob, clients for those entities + can also be retrieved using the `get_client` functions. + + :param str account_url: + The URL to the blob storage account. Any other entities included + in the URL path (e.g. container or blob) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication_async.py + :start-after: [START create_blob_service_client] + :end-before: [END create_blob_service_client] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient with account url and credential. + + .. literalinclude:: ../samples/blob_samples_authentication_async.py + :start-after: [START create_blob_service_client_oauth] + :end-before: [END create_blob_service_client_oauth] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient with Azure Identity credentials. + """ + + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(BlobServiceClient, self).__init__( + account_url, + credential=credential, + **kwargs) + self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + @distributed_trace_async + async def get_user_delegation_key(self, key_start_time, # type: datetime + key_expiry_time, # type: datetime + **kwargs # type: Any + ): + # type: (...) -> UserDelegationKey + """ + Obtain a user delegation key for the purpose of signing SAS tokens. + A token credential must be present on the service object for this request to succeed. + + :param ~datetime.datetime key_start_time: + A DateTime value. Indicates when the key becomes valid. + :param ~datetime.datetime key_expiry_time: + A DateTime value. Indicates when the key stops being valid. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The user delegation key. + :rtype: ~azure.storage.blob.UserDelegationKey + """ + key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) + timeout = kwargs.pop('timeout', None) + try: + user_delegation_key = await self._client.service.get_user_delegation_key(key_info=key_info, + timeout=timeout, + **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore + + @distributed_trace_async + async def get_account_information(self, **kwargs): + # type: (Any) -> Dict[str, str] + """Gets information related to the storage account. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START get_blob_service_account_info] + :end-before: [END get_blob_service_account_info] + :language: python + :dedent: 12 + :caption: Getting account information for the blob service. + """ + try: + return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_service_stats(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Retrieves statistics related to replication for the Blob service. + + It is only available when read-access geo-redundant replication is enabled for + the storage account. + + With geo-redundant replication, Azure Storage maintains your data durable + in two locations. In both locations, Azure Storage constantly maintains + multiple healthy replicas of your data. The location where you read, + create, update, or delete data is the primary storage account location. + The primary location exists in the region you choose at the time you + create an account via the Azure Management Azure classic portal, for + example, North Central US. The location to which your data is replicated + is the secondary location. The secondary location is automatically + determined based on the location of the primary; it is in a second data + center that resides in the same region as the primary location. Read-only + access is available from the secondary location, if read-access geo-redundant + replication is enabled for your storage account. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The blob service stats. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START get_blob_service_stats] + :end-before: [END get_blob_service_stats] + :language: python + :dedent: 12 + :caption: Getting service stats for the blob service. + """ + timeout = kwargs.pop('timeout', None) + try: + stats = await self._client.service.get_statistics( # type: ignore + timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) + return service_stats_deserialize(stats) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_service_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An object containing blob service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START get_blob_service_properties] + :end-before: [END get_blob_service_properties] + :language: python + :dedent: 12 + :caption: Getting service properties for the blob service. + """ + timeout = kwargs.pop('timeout', None) + try: + service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) + return service_properties_deserialize(service_props) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_service_properties( + self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] + hour_metrics=None, # type: Optional[Metrics] + minute_metrics=None, # type: Optional[Metrics] + cors=None, # type: Optional[List[CorsRule]] + target_version=None, # type: Optional[str] + delete_retention_policy=None, # type: Optional[RetentionPolicy] + static_website=None, # type: Optional[StaticWebsite] + **kwargs + ): + # type: (...) -> None + """Sets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + If an element (e.g. analytics_logging) is left as None, the + existing settings on the service for that functionality are preserved. + + :param analytics_logging: + Groups the Azure Analytics Logging settings. + :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for blobs. + :type hour_metrics: ~azure.storage.blob.Metrics + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for blobs. + :type minute_metrics: ~azure.storage.blob.Metrics + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list[~azure.storage.blob.CorsRule] + :param str target_version: + Indicates the default version to use for requests if an incoming + request's version is not specified. + :param delete_retention_policy: + The delete retention policy specifies whether to retain deleted blobs. + It also specifies the number of days and versions of blob to keep. + :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy + :param static_website: + Specifies whether the static website feature is enabled, + and if yes, indicates the index document and 404 error document to use. + :type static_website: ~azure.storage.blob.StaticWebsite + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START set_blob_service_properties] + :end-before: [END set_blob_service_properties] + :language: python + :dedent: 12 + :caption: Setting service properties for the blob service. + """ + if all(parameter is None for parameter in [ + analytics_logging, hour_metrics, minute_metrics, cors, + target_version, delete_retention_policy, static_website]): + raise ValueError("set_service_properties should be called with at least one parameter") + + props = StorageServiceProperties( + logging=analytics_logging, + hour_metrics=hour_metrics, + minute_metrics=minute_metrics, + cors=cors, + default_service_version=target_version, + delete_retention_policy=delete_retention_policy, + static_website=static_website + ) + timeout = kwargs.pop('timeout', None) + try: + await self._client.service.set_properties(props, timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_containers( + self, name_starts_with=None, # type: Optional[str] + include_metadata=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> AsyncItemPaged[ContainerProperties] + """Returns a generator to list the containers under the specified account. + + The generator will lazily follow the continuation tokens returned by + the service and stop when all containers have been returned. + + :param str name_starts_with: + Filters the results to return only containers whose names + begin with the specified prefix. + :param bool include_metadata: + Specifies that container metadata to be returned in the response. + The default value is `False`. + :keyword bool include_deleted: + Specifies that deleted containers to be returned in the response. This is for container restore enabled + account. The default value is `False`. + .. versionadded:: 12.4.0 + :keyword int results_per_page: + The maximum number of container names to retrieve per API + call. If the request does not specify the server will return up to 5,000 items. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) of ContainerProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.ContainerProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_list_containers] + :end-before: [END bsc_list_containers] + :language: python + :dedent: 16 + :caption: Listing the containers in the blob service. + """ + include = ['metadata'] if include_metadata else [] + include_deleted = kwargs.pop('include_deleted', None) + if include_deleted: + include.append("deleted") + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.service.list_containers_segment, + prefix=name_starts_with, + include=include, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + page_iterator_class=ContainerPropertiesPaged + ) + + @distributed_trace + def find_blobs_by_tags(self, filter_expression, **kwargs): + # type: (str, **Any) -> AsyncItemPaged[FilteredBlob] + """The Filter Blobs operation enables callers to list blobs across all + containers whose tags match a given search expression. Filter blobs + searches across all containers within a storage account but can be + scoped within the expression to a single container. + + :param str filter_expression: + The expression to find blobs whose tags matches the specified condition. + eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" + To specify a container, eg. "@container='containerName' and \"Name\"='C'" + :keyword int results_per_page: + The max result per page when paginating. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.FilteredBlob] + """ + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.service.filter_blobs, + where=filter_expression, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=FilteredBlobPaged) + + @distributed_trace_async + async def create_container( + self, name, # type: str + metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[Union[PublicAccess, str]] + **kwargs + ): + # type: (...) -> ContainerClient + """Creates a new container under the specified account. + + If the container with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created container. + + :param str name: The name of the container to create. + :param metadata: + A dict with name-value pairs to associate with the + container as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + Possible values include: 'container', 'blob'. + :type public_access: str or ~azure.storage.blob.PublicAccess + :keyword container_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + + .. versionadded:: 12.2.0 + + :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.aio.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_create_container] + :end-before: [END bsc_create_container] + :language: python + :dedent: 16 + :caption: Creating a container in the blob service. + """ + container = self.get_container_client(name) + timeout = kwargs.pop('timeout', None) + kwargs.setdefault('merge_span', True) + await container.create_container( + metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) + return container + + @distributed_trace_async + async def delete_container( + self, container, # type: Union[ContainerProperties, str] + lease=None, # type: Optional[Union[BlobLeaseClient, str]] + **kwargs + ): + # type: (...) -> None + """Marks the specified container for deletion. + + The container and any blobs contained within it are later deleted during garbage collection. + If the container is not found, a ResourceNotFoundError will be raised. + + :param container: + The container to delete. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :param lease: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_delete_container] + :end-before: [END bsc_delete_container] + :language: python + :dedent: 16 + :caption: Deleting a container in the blob service. + """ + container = self.get_container_client(container) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + await container.delete_container( # type: ignore + lease=lease, + timeout=timeout, + **kwargs) + + @distributed_trace_async + async def _rename_container(self, name, new_name, **kwargs): + # type: (str, str, **Any) -> ContainerClient + """Renames a container. + + Operation is successful only if the source container exists. + + :param str name: + The name of the container to rename. + :param str new_name: + The new container name the user wants to rename to. + :keyword lease: + Specify this to perform only if the lease ID given + matches the active lease ID of the source container. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.ContainerClient + """ + renamed_container = self.get_container_client(new_name) + lease = kwargs.pop('lease', None) + try: + kwargs['source_lease_id'] = lease.id # type: str + except AttributeError: + kwargs['source_lease_id'] = lease + try: + await renamed_container._client.container.rename(name, **kwargs) # pylint: disable = protected-access + return renamed_container + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): + # type: (str, str, **Any) -> ContainerClient + """Restores soft-deleted container. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :param str deleted_container_name: + Specifies the name of the deleted container to restore. + :param str deleted_container_version: + Specifies the version of the deleted container to restore. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.aio.ContainerClient + """ + new_name = kwargs.pop('new_name', None) + if new_name: + warnings.warn("`new_name` is no longer supported.", DeprecationWarning) + container = self.get_container_client(new_name or deleted_container_name) + try: + await container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access + deleted_container_version=deleted_container_version, + timeout=kwargs.pop('timeout', None), **kwargs) + return container + except HttpResponseError as error: + process_storage_error(error) + + def get_container_client(self, container): + # type: (Union[ContainerProperties, str]) -> ContainerClient + """Get a client to interact with the specified container. + + The container need not already exist. + + :param container: + The container. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :returns: A ContainerClient. + :rtype: ~azure.storage.blob.aio.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_get_container_client] + :end-before: [END bsc_get_container_client] + :language: python + :dedent: 12 + :caption: Getting the container client to interact with a specific container. + """ + try: + container_name = container.name + except AttributeError: + container_name = container + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ContainerClient( + self.url, container_name=container_name, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def get_blob_client( + self, container, # type: Union[ContainerProperties, str] + blob, # type: Union[BlobProperties, str] + snapshot=None # type: Optional[Union[Dict[str, Any], str]] + ): + # type: (...) -> BlobClient + """Get a client to interact with the specified blob. + + The blob need not already exist. + + :param container: + The container that the blob is in. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :param blob: + The blob with which to interact. This can either be the name of the blob, + or an instance of BlobProperties. + :type blob: str or ~azure.storage.blob.BlobProperties + :param snapshot: + The optional blob snapshot on which to operate. This can either be the ID of the snapshot, + or a dictionary output returned by + :func:`~azure.storage.blob.aio.BlobClient.create_snapshot()`. + :type snapshot: str or dict(str, Any) + :returns: A BlobClient. + :rtype: ~azure.storage.blob.aio.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_get_blob_client] + :end-before: [END bsc_get_blob_client] + :language: python + :dedent: 16 + :caption: Getting the blob client to interact with a specific blob. + """ + try: + container_name = container.name + except AttributeError: + container_name = container + + try: + blob_name = blob.name + except AttributeError: + blob_name = blob + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return BlobClient( # type: ignore + self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_container_client_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_container_client_async.py new file mode 100644 index 0000000..2f73b9c --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_container_client_async.py @@ -0,0 +1,1209 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, AnyStr, Dict, List, IO, AsyncIterator, + TYPE_CHECKING +) + +from azure.core.exceptions import HttpResponseError, ResourceNotFoundError +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.async_paging import AsyncItemPaged +from azure.core.pipeline import AsyncPipeline +from azure.core.pipeline.transport import AsyncHttpResponse + +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.policies_async import ExponentialRetry +from .._shared.request_handlers import add_metadata_headers, serialize_iso +from .._shared.response_handlers import ( + process_storage_error, + return_response_headers, + return_headers_and_deserialized) +from .._generated.aio import AzureBlobStorage +from .._generated.models import SignedIdentifier +from .._deserialize import deserialize_container_properties +from .._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions +from .._container_client import ContainerClient as ContainerClientBase, _get_blob_name +from .._models import ContainerProperties, BlobType, BlobProperties # pylint: disable=unused-import +from ._list_blobs_helper import BlobPropertiesPaged, BlobPrefix +from ._lease_async import BlobLeaseClient +from ._blob_client_async import BlobClient + +if TYPE_CHECKING: + from .._models import PublicAccess + from ._download_async import StorageStreamDownloader + from datetime import datetime + from .._models import ( # pylint: disable=unused-import + AccessPolicy, + StandardBlobTier, + PremiumPageBlobTier) + + +class ContainerClient(AsyncStorageAccountHostsMixin, ContainerClientBase): + """A client to interact with a specific container, although that container + may not yet exist. + + For operations relating to a specific blob within this container, a blob client can be + retrieved using the :func:`~get_blob_client` function. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the container, + use the :func:`from_container_url` classmethod. + :param container_name: + The name of the container for the blob. + :type container_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START create_container_client_from_service] + :end-before: [END create_container_client_from_service] + :language: python + :dedent: 8 + :caption: Get a ContainerClient from an existing BlobServiceClient. + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START create_container_client_sasurl] + :end-before: [END create_container_client_sasurl] + :language: python + :dedent: 12 + :caption: Creating the container client directly. + """ + def __init__( + self, account_url, # type: str + container_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(ContainerClient, self).__init__( + account_url, + container_name=container_name, + credential=credential, + **kwargs) + self._client = AzureBlobStorage(url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + @distributed_trace_async + async def create_container(self, metadata=None, public_access=None, **kwargs): + # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> None + """ + Creates a new container under the specified account. If the container + with the same name already exists, the operation fails. + + :param metadata: + A dict with name_value pairs to associate with the + container as metadata. Example:{'Category':'test'} + :type metadata: dict[str, str] + :param ~azure.storage.blob.PublicAccess public_access: + Possible values include: 'container', 'blob'. + :keyword container_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + + .. versionadded:: 12.2.0 + + :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START create_container] + :end-before: [END create_container] + :language: python + :dedent: 16 + :caption: Creating a container to store blobs. + """ + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + timeout = kwargs.pop('timeout', None) + container_cpk_scope_info = get_container_cpk_scope_info(kwargs) + try: + return await self._client.container.create( # type: ignore + timeout=timeout, + access=public_access, + container_cpk_scope_info=container_cpk_scope_info, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def _rename_container(self, new_name, **kwargs): + # type: (str, **Any) -> ContainerClient + """Renames a container. + + Operation is successful only if the source container exists. + + :param str new_name: + The new container name the user wants to rename to. + :keyword lease: + Specify this to perform only if the lease ID given + matches the active lease ID of the source container. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.blob.ContainerClient + """ + lease = kwargs.pop('lease', None) + try: + kwargs['source_lease_id'] = lease.id # type: str + except AttributeError: + kwargs['source_lease_id'] = lease + try: + renamed_container = ContainerClient( + "{}://{}".format(self.scheme, self.primary_hostname), container_name=new_name, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + await renamed_container._client.container.rename(self.container_name, **kwargs) # pylint: disable = protected-access + return renamed_container + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def delete_container( + self, **kwargs): + # type: (Any) -> None + """ + Marks the specified container for deletion. The container and any blobs + contained within it are later deleted during garbage collection. + + :keyword lease: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START delete_container] + :end-before: [END delete_container] + :language: python + :dedent: 16 + :caption: Delete a container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + mod_conditions = get_modify_conditions(kwargs) + timeout = kwargs.pop('timeout', None) + try: + await self._client.container.delete( + timeout=timeout, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def acquire_lease( + self, lease_duration=-1, # type: int + lease_id=None, # type: Optional[str] + **kwargs): + # type: (...) -> BlobLeaseClient + """ + Requests a new lease. If the container does not have an active lease, + the Blob service creates a lease on the container and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A BlobLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.blob.aio.BlobLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START acquire_lease_on_container] + :end-before: [END acquire_lease_on_container] + :language: python + :dedent: 12 + :caption: Acquiring a lease on the container. + """ + lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + await lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) + return lease + + @distributed_trace_async + async def get_account_information(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """Gets information related to the storage account. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + """ + try: + return await self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_container_properties(self, **kwargs): + # type: (**Any) -> ContainerProperties + """Returns all user-defined metadata and system properties for the specified + container. The data returned does not include the container's list of blobs. + + :keyword lease: + If specified, get_container_properties only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Properties for the specified container within a container object. + :rtype: ~azure.storage.blob.ContainerProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START get_container_properties] + :end-before: [END get_container_properties] + :language: python + :dedent: 16 + :caption: Getting properties on the container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + response = await self._client.container.get_properties( + timeout=timeout, + lease_access_conditions=access_conditions, + cls=deserialize_container_properties, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + response.name = self.container_name + return response # type: ignore + + @distributed_trace_async + async def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a container exists and returns False otherwise. + + :kwarg int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + try: + await self._client.container.get_properties(**kwargs) + return True + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceNotFoundError: + return False + + @distributed_trace_async + async def set_container_metadata( # type: ignore + self, metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + container. Each call to this operation replaces all existing metadata + attached to the container. To remove all metadata from the container, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the container as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_container_metadata only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Container-updated property dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START set_container_metadata] + :end-before: [END set_container_metadata] + :language: python + :dedent: 16 + :caption: Setting metadata on the container. + """ + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + mod_conditions = get_modify_conditions(kwargs) + timeout = kwargs.pop('timeout', None) + try: + return await self._client.container.set_metadata( # type: ignore + timeout=timeout, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def _get_blob_service_client(self): # pylint: disable=client-method-missing-kwargs + # type: (...) -> BlobServiceClient + """Get a client to interact with the container's parent service account. + + Defaults to current container's credentials. + + :returns: A BlobServiceClient. + :rtype: ~azure.storage.blob.BlobServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START get_blob_service_client_from_container_client] + :end-before: [END get_blob_service_client_from_container_client] + :language: python + :dedent: 8 + :caption: Get blob service client from container object. + """ + from ._blob_service_client_async import BlobServiceClient + if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + else: + _pipeline = self._pipeline # pylint: disable = protected-access + return BlobServiceClient( + "{}://{}".format(self.scheme, self.primary_hostname), + credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, + _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function, + _pipeline=_pipeline) + + + @distributed_trace_async + async def get_container_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the specified container. + The permissions indicate whether container data may be accessed publicly. + + :keyword lease: + If specified, get_container_access_policy only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START get_container_access_policy] + :end-before: [END get_container_access_policy] + :language: python + :dedent: 16 + :caption: Getting the access policy on the container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + response, identifiers = await self._client.container.get_access_policy( + timeout=timeout, + lease_access_conditions=access_conditions, + cls=return_headers_and_deserialized, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return { + 'public_access': response.get('blob_public_access'), + 'signed_identifiers': identifiers or [] + } + + @distributed_trace_async + async def set_container_access_policy( + self, signed_identifiers, # type: Dict[str, AccessPolicy] + public_access=None, # type: Optional[Union[str, PublicAccess]] + **kwargs # type: Any + ): # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the permissions for the specified container or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether blobs in a container may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the container. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] + :param ~azure.storage.blob.PublicAccess public_access: + Possible values include: 'container', 'blob'. + :keyword lease: + Required if the container has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified date/time. + :keyword ~datetime.datetime if_unmodified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Container-updated property dict (Etag and last modified). + :rtype: dict[str, str or ~datetime.datetime] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START set_container_access_policy] + :end-before: [END set_container_access_policy] + :language: python + :dedent: 16 + :caption: Setting access policy on the container. + """ + timeout = kwargs.pop('timeout', None) + lease = kwargs.pop('lease', None) + if len(signed_identifiers) > 5: + raise ValueError( + 'Too many access policies provided. The server does not support setting ' + 'more than 5 access policies on a single resource.') + identifiers = [] + for key, value in signed_identifiers.items(): + if value: + value.start = serialize_iso(value.start) + value.expiry = serialize_iso(value.expiry) + identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore + signed_identifiers = identifiers # type: ignore + + mod_conditions = get_modify_conditions(kwargs) + access_conditions = get_access_conditions(lease) + try: + return await self._client.container.set_access_policy( + container_acl=signed_identifiers or None, + timeout=timeout, + access=public_access, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_blobs(self, name_starts_with=None, include=None, **kwargs): + # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> AsyncItemPaged[BlobProperties] + """Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. + + :param str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param list[str] or str include: + Specifies one or more additional datasets to include in the response. + Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions', + 'tags', 'versions'. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START list_blobs_in_container] + :end-before: [END list_blobs_in_container] + :language: python + :dedent: 12 + :caption: List the blobs in the container. + """ + if include and not isinstance(include, list): + include = [include] + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.list_blob_flat_segment, + include=include, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + page_iterator_class=BlobPropertiesPaged + ) + + @distributed_trace + def walk_blobs( + self, name_starts_with=None, # type: Optional[str] + include=None, # type: Optional[Any] + delimiter="/", # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> AsyncItemPaged[BlobProperties] + """Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. This operation will list blobs in accordance with a hierarchy, + as delimited by the specified delimiter character. + + :param str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param list[str] include: + Specifies one or more additional datasets to include in the response. + Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted'. + :param str delimiter: + When the request includes this parameter, the operation returns a BlobPrefix + element in the response body that acts as a placeholder for all blobs whose + names begin with the same substring up to the appearance of the delimiter + character. The delimiter may be a single character or a string. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] + """ + if include and not isinstance(include, list): + include = [include] + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.list_blob_hierarchy_segment, + delimiter=delimiter, + include=include, + timeout=timeout, + **kwargs) + return BlobPrefix( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + delimiter=delimiter) + + @distributed_trace_async + async def upload_blob( + self, name, # type: Union[str, BlobProperties] + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + blob_type=BlobType.BlockBlob, # type: Union[str, BlobType] + length=None, # type: Optional[int] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> BlobClient + """Creates a new blob from a data source with automatic chunking. + + :param name: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type name: str or ~azure.storage.blob.BlobProperties + :param data: The blob data to upload. + :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be + either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. The exception to the above is with Append + blob types: if set to False and the data already exists, an error will not be raised + and the data will be appended to the existing blob. If set overwrite=True, then the existing + append blob will be deleted, and a new one created. Defaults to False. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the container has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int max_concurrency: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :returns: A BlobClient to interact with the newly uploaded blob. + :rtype: ~azure.storage.blob.aio.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START upload_blob_to_container] + :end-before: [END upload_blob_to_container] + :language: python + :dedent: 12 + :caption: Upload blob to the container. + """ + blob = self.get_blob_client(name) + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + await blob.upload_blob( + data, + blob_type=blob_type, + length=length, + metadata=metadata, + timeout=timeout, + encoding=encoding, + **kwargs + ) + return blob + + @distributed_trace_async + async def delete_blob( + self, blob, # type: Union[str, BlobProperties] + delete_snapshots=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> None + """Marks the specified blob or snapshot for deletion. + + The blob is later deleted during garbage collection. + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the delete_blob + operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot + and retains the blob or snapshot for specified number of days. + After specified number of days, blob's data is removed from the service during garbage collection. + Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` + option. Soft-deleted blob or snapshot can be restored using :func:`~BlobClient.undelete()` + + :param blob: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob: str or ~azure.storage.blob.BlobProperties + :param str delete_snapshots: + Required if the blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword lease: + Required if the blob has an active lease. Value can be a Lease object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + blob = self.get_blob_client(blob) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + await blob.delete_blob( # type: ignore + delete_snapshots=delete_snapshots, + timeout=timeout, + **kwargs) + + @distributed_trace_async + async def download_blob(self, blob, offset=None, length=None, **kwargs): + # type: (Union[str, BlobProperties], Optional[int], Optional[int], Any) -> StorageStreamDownloader + """Downloads a blob to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the blob into + a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. + + :param blob: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob: str or ~azure.storage.blob.BlobProperties + :param int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, download_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword str encoding: + Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object. (StorageStreamDownloader) + :rtype: ~azure.storage.blob.aio.StorageStreamDownloader + """ + blob_client = self.get_blob_client(blob) # type: ignore + kwargs.setdefault('merge_span', True) + return await blob_client.download_blob( + offset=offset, + length=length, + **kwargs) + + @distributed_trace_async + async def delete_blobs( # pylint: disable=arguments-differ + self, *blobs: List[Union[str, BlobProperties, dict]], + **kwargs + ) -> AsyncIterator[AsyncHttpResponse]: + """Marks the specified blobs or snapshots for deletion. + + The blobs are later deleted during garbage collection. + Note that in order to delete blobs, you must delete all of their + snapshots. You can delete both at the same time with the delete_blobs operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots + and retains the blobs or snapshots for specified number of days. + After specified number of days, blobs' data is removed from the service during garbage collection. + Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` + Soft-deleted blobs or snapshots can be restored using :func:`~BlobClient.undelete()` + + :param blobs: + The blobs to delete. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + snapshot you want to delete: + key: 'snapshot', value type: str + whether to delete snapthots when deleting blob: + key: 'delete_snapshots', value: 'include' or 'only' + if the blob modified or not: + key: 'if_modified_since', 'if_unmodified_since', value type: datetime + etag: + key: 'etag', value type: str + match the etag or not: + key: 'match_condition', value type: MatchConditions + tags match condition: + key: 'if_tags_match_condition', value type: str + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword str delete_snapshots: + Required if a blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. For optimal performance, + this should be set to False + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: An async iterator of responses, one for each blob in order + :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START delete_multiple_blobs] + :end-before: [END delete_multiple_blobs] + :language: python + :dedent: 12 + :caption: Deleting multiple blobs. + """ + if len(blobs) == 0: + return iter(list()) + + reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) + + return await self._batch_send(*reqs, **options) + + @distributed_trace + async def set_standard_blob_tier_blobs( + self, + standard_blob_tier: Union[str, 'StandardBlobTier'], + *blobs: List[Union[str, BlobProperties, dict]], + **kwargs + ) -> AsyncIterator[AsyncHttpResponse]: + """This operation sets the tier on block blobs. + + A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + :param standard_blob_tier: + Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', + 'Archive'. The hot tier is optimized for storing data that is accessed + frequently. The cool storage tier is optimized for storing data that + is infrequently accessed and stored for at least a month. The archive + tier is optimized for storing data that is rarely accessed and stored + for at least six months with flexible latency requirements. + + .. note:: + If you want to set different tier on different blobs please set this positional parameter to None. + Then the blob tier on every BlobProperties will be taken. + + :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier + :param blobs: + The blobs with which to interact. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + blob name: + key: 'name', value type: str + standard blob tier: + key: 'blob_tier', value type: StandardBlobTier + rehydrate priority: + key: 'rehydrate_priority', value type: RehydratePriority + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + tags match condition: + key: 'if_tags_match_condition', value type: str + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. For optimal performance, + this should be set to False. + :return: An async iterator of responses, one for each blob in order + :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] + """ + reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) + + return await self._batch_send(*reqs, **options) + + @distributed_trace + async def set_premium_page_blob_tier_blobs( + self, + premium_page_blob_tier: Union[str, 'PremiumPageBlobTier'], + *blobs: List[Union[str, BlobProperties, dict]], + **kwargs + ) -> AsyncIterator[AsyncHttpResponse]: + """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts. + + :param premium_page_blob_tier: + A page blob tier value to set on all blobs to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + + .. note:: + If you want to set different tier on different blobs please set this positional parameter to None. + Then the blob tier on every BlobProperties will be taken. + + :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + premium blob tier: + key: 'blob_tier', value type: PremiumPageBlobTier + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: list[str], list[dict], or list[~azure.storage.blob.BlobProperties] + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. For optimal performance, + this should be set to False. + :return: An async iterator of responses, one for each blob in order + :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] + """ + reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) + + return await self._batch_send(*reqs, **options) + + def get_blob_client( + self, blob, # type: Union[BlobProperties, str] + snapshot=None # type: str + ): + # type: (...) -> BlobClient + """Get a client to interact with the specified blob. + + The blob need not already exist. + + :param blob: + The blob with which to interact. + :type blob: str or ~azure.storage.blob.BlobProperties + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`~BlobClient.create_snapshot()`. + :returns: A BlobClient. + :rtype: ~azure.storage.blob.aio.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START get_blob_client] + :end-before: [END get_blob_client] + :language: python + :dedent: 12 + :caption: Get the blob client. + """ + blob_name = _get_blob_name(blob) + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return BlobClient( + self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_download_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_download_async.py new file mode 100644 index 0000000..135fd66 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_download_async.py @@ -0,0 +1,547 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import asyncio +import sys +from io import BytesIO +from itertools import islice +import warnings +from typing import AsyncIterator + +from aiohttp import ClientPayloadError +from azure.core.exceptions import HttpResponseError, ServiceResponseError +from .._shared.encryption import decrypt_blob +from .._shared.request_handlers import validate_and_format_range_headers +from .._shared.response_handlers import process_storage_error, parse_length_from_content_range +from .._deserialize import get_page_ranges_result +from .._download import process_range_and_offset, _ChunkDownloader + +async def process_content(data, start_offset, end_offset, encryption): + if data is None: + raise ValueError("Response cannot be None.") + content = data.response.body() + if encryption.get('key') is not None or encryption.get('resolver') is not None: + try: + return decrypt_blob( + encryption.get('required'), + encryption.get('key'), + encryption.get('resolver'), + content, + start_offset, + end_offset, + data.response.headers) + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=data.response, + error=error) + return content + + +class _AsyncChunkDownloader(_ChunkDownloader): + def __init__(self, **kwargs): + super(_AsyncChunkDownloader, self).__init__(**kwargs) + self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None + self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None + + async def process_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) + length = chunk_end - chunk_start + if length > 0: + await self._write_to_stream(chunk_data, chunk_start) + await self._update_progress(length) + + async def yield_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + return await self._download_chunk(chunk_start, chunk_end - 1) + + async def _update_progress(self, length): + if self.progress_lock: + async with self.progress_lock: # pylint: disable=not-async-context-manager + self.progress_total += length + else: + self.progress_total += length + + async def _write_to_stream(self, chunk_data, chunk_start): + if self.stream_lock: + async with self.stream_lock: # pylint: disable=not-async-context-manager + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + else: + self.stream.write(chunk_data) + + async def _download_chunk(self, chunk_start, chunk_end): + download_range, offset = process_range_and_offset( + chunk_start, chunk_end, chunk_end, self.encryption_options) + + # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. + # Do optimize and create empty chunk locally if condition is met. + if self._do_optimize(download_range[0], download_range[1]): + chunk_data = b"\x00" * self.chunk_size + else: + range_header, range_validation = validate_and_format_range_headers( + download_range[0], + download_range[1], + check_content_md5=self.validate_content + ) + retry_active = True + retry_total = 3 + while retry_active: + try: + _, response = await self.client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self.validate_content, + data_stream_total=self.total_size, + download_stream_current=self.progress_total, + **self.request_options + ) + retry_active = False + + except HttpResponseError as error: + process_storage_error(error) + except ClientPayloadError as error: + retry_total -= 1 + if retry_total <= 0: + raise ServiceResponseError(error, error=error) + await asyncio.sleep(1) + + chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) + + + # This makes sure that if_match is set so that we can validate + # that subsequent downloads are to an unmodified blob + if self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = response.properties.etag + + return chunk_data + + +class _AsyncChunkIterator(object): + """Async iterator for chunks in blob download stream.""" + + def __init__(self, size, content, downloader, chunk_size): + self.size = size + self._chunk_size = chunk_size + self._current_content = content + self._iter_downloader = downloader + self._iter_chunks = None + self._complete = (size == 0) + + def __len__(self): + return self.size + + def __iter__(self): + raise TypeError("Async stream must be iterated asynchronously.") + + def __aiter__(self): + return self + + async def __anext__(self): + """Iterate through responses.""" + if self._complete: + raise StopAsyncIteration("Download complete") + if not self._iter_downloader: + # cut the data obtained from initial GET into chunks + if len(self._current_content) > self._chunk_size: + return self._get_chunk_data() + self._complete = True + return self._current_content + + if not self._iter_chunks: + self._iter_chunks = self._iter_downloader.get_chunk_offsets() + + # initial GET result still has more than _chunk_size bytes of data + if len(self._current_content) >= self._chunk_size: + return self._get_chunk_data() + + try: + chunk = next(self._iter_chunks) + self._current_content += await self._iter_downloader.yield_chunk(chunk) + except StopIteration: + self._complete = True + # it's likely that there some data left in self._current_content + if self._current_content: + return self._current_content + raise StopAsyncIteration("Download complete") + + return self._get_chunk_data() + + def _get_chunk_data(self): + chunk_data = self._current_content[: self._chunk_size] + self._current_content = self._current_content[self._chunk_size:] + return chunk_data + + +class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the blob being downloaded. + :ivar str container: + The name of the container where the blob is. + :ivar ~azure.storage.blob.BlobProperties properties: + The properties of the blob being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if speficied, + otherwise the total size of the blob. + """ + + def __init__( + self, + clients=None, + config=None, + start_range=None, + end_range=None, + validate_content=None, + encryption_options=None, + max_concurrency=1, + name=None, + container=None, + encoding=None, + **kwargs + ): + self.name = name + self.container = container + self.properties = None + self.size = None + + self._clients = clients + self._config = config + self._start_range = start_range + self._end_range = end_range + self._max_concurrency = max_concurrency + self._encoding = encoding + self._validate_content = validate_content + self._encryption_options = encryption_options or {} + self._request_options = kwargs + self._location_mode = None + self._download_complete = False + self._current_content = None + self._file_size = None + self._non_empty_ranges = None + self._response = None + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + self._first_get_size = self._config.max_single_get_size if not self._validate_content \ + else self._config.max_chunk_get_size + initial_request_start = self._start_range if self._start_range is not None else 0 + if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: + initial_request_end = self._end_range + else: + initial_request_end = initial_request_start + self._first_get_size - 1 + + self._initial_range, self._initial_offset = process_range_and_offset( + initial_request_start, initial_request_end, self._end_range, self._encryption_options + ) + + def __len__(self): + return self.size + + async def _setup(self): + self._response = await self._initial_request() + self.properties = self._response.properties + self.properties.name = self.name + self.properties.container = self.container + + # Set the content length to the download size instead of the size of + # the last range + self.properties.size = self.size + + # Overwrite the content range to the user requested range + self.properties.content_range = 'bytes {0}-{1}/{2}'.format( + self._start_range, + self._end_range, + self._file_size + ) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + self.properties.content_md5 = None + + if self.size == 0: + self._current_content = b"" + else: + self._current_content = await process_content( + self._response, + self._initial_offset[0], + self._initial_offset[1], + self._encryption_options + ) + + async def _initial_request(self): + range_header, range_validation = validate_and_format_range_headers( + self._initial_range[0], + self._initial_range[1], + start_range_required=False, + end_range_required=False, + check_content_md5=self._validate_content) + + retry_active = True + retry_total = 3 + while retry_active: + try: + location_mode, response = await self._clients.blob.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self._validate_content, + data_stream_total=None, + download_stream_current=0, + **self._request_options) + + # Check the location we read from to ensure we use the same one + # for subsequent requests. + self._location_mode = location_mode + + # Parse the total file size and adjust the download size if ranges + # were specified + self._file_size = parse_length_from_content_range(response.properties.content_range) + if self._end_range is not None: + # Use the length unless it is over the end of the file + self.size = min(self._file_size, self._end_range - self._start_range + 1) + elif self._start_range is not None: + self.size = self._file_size - self._start_range + else: + self.size = self._file_size + retry_active = False + + except HttpResponseError as error: + if self._start_range is None and error.response.status_code == 416: + # Get range will fail on an empty file. If the user did not + # request a range, do a regular get request in order to get + # any properties. + try: + _, response = await self._clients.blob.download( + validate_content=self._validate_content, + data_stream_total=0, + download_stream_current=0, + **self._request_options) + retry_active = False + except HttpResponseError as error: + process_storage_error(error) + + # Set the download size to empty + self.size = 0 + self._file_size = 0 + else: + process_storage_error(error) + + except ClientPayloadError as error: + retry_total -= 1 + if retry_total <= 0: + raise ServiceResponseError(error, error=error) + await asyncio.sleep(1) + + # get page ranges to optimize downloading sparse page blob + if response.properties.blob_type == 'PageBlob': + try: + page_ranges = await self._clients.page_blob.get_page_ranges() + self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] + except HttpResponseError: + pass + + # If the file is small, the download is complete at this point. + # If file size is large, download the rest of the file in chunks. + if response.properties.size != self.size: + if self._request_options.get('modified_access_conditions'): + self._request_options['modified_access_conditions'].if_match = response.properties.etag + else: + self._download_complete = True + return response + + def chunks(self): + # type: () -> AsyncIterator[bytes] + """Iterate over chunks in the download stream. + + :rtype: AsyncIterator[bytes] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world_async.py + :start-after: [START download_a_blob_in_chunk] + :end-before: [END download_a_blob_in_chunk] + :language: python + :dedent: 16 + :caption: Download a blob using chunks(). + """ + if self.size == 0 or self._download_complete: + iter_downloader = None + else: + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + iter_downloader = _AsyncChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # Start where the first download ended + end_range=data_end, + stream=None, + parallel=False, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options) + return _AsyncChunkIterator( + size=self.size, + content=self._current_content, + downloader=iter_downloader, + chunk_size=self._config.max_chunk_get_size) + + async def readall(self): + """Download the contents of this blob. + + This operation is blocking until all data is downloaded. + :rtype: bytes or str + """ + stream = BytesIO() + await self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + async def content_as_bytes(self, max_concurrency=1): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :rtype: bytes + """ + warnings.warn( + "content_as_bytes is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + return await self.readall() + + async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): + """Download the contents of this blob, and decode as text. + + This operation is blocking until all data is downloaded. + + :param int max_concurrency: + The number of parallel connections with which to download. + :param str encoding: + Test encoding to decode the downloaded bytes. Default is UTF-8. + :rtype: str + """ + warnings.warn( + "content_as_text is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self._encoding = encoding + return await self.readall() + + async def readinto(self, stream): + """Download the contents of this blob to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + # the stream must be seekable if parallel download is required + parallel = self._max_concurrency > 1 + if parallel: + error_message = "Target stream handle must be seekable." + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(error_message) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(error_message) + + # Write the content to the user stream + stream.write(self._current_content) + if self._download_complete: + return self.size + + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + downloader = _AsyncChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # start where the first download ended + end_range=data_end, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + **self._request_options) + + dl_tasks = downloader.get_chunk_offsets() + running_futures = [ + asyncio.ensure_future(downloader.process_chunk(d)) + for d in islice(dl_tasks, 0, self._max_concurrency) + ] + while running_futures: + # Wait for some download to finish before adding a new one + done, running_futures = await asyncio.wait( + running_futures, return_when=asyncio.FIRST_COMPLETED) + try: + for task in done: + task.result() + except HttpResponseError as error: + process_storage_error(error) + try: + next_chunk = next(dl_tasks) + except StopIteration: + break + else: + running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) + + if running_futures: + # Wait for the remaining downloads to finish + done, _running_futures = await asyncio.wait(running_futures) + try: + for task in done: + task.result() + except HttpResponseError as error: + process_storage_error(error) + return self.size + + async def download_to_stream(self, stream, max_concurrency=1): + """Download the contents of this blob to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :param int max_concurrency: + The number of parallel connections with which to download. + :returns: The properties of the downloaded blob. + :rtype: Any + """ + warnings.warn( + "download_to_stream is deprecated, use readinto instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + await self.readinto(stream) + return self.properties diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_lease_async.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_lease_async.py new file mode 100644 index 0000000..79e6733 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_lease_async.py @@ -0,0 +1,325 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, + TypeVar, TYPE_CHECKING +) + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator_async import distributed_trace_async + +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._serialize import get_modify_conditions +from .._lease import BlobLeaseClient as LeaseClientBase + +if TYPE_CHECKING: + from datetime import datetime + from .._generated.operations import BlobOperations, ContainerOperations + BlobClient = TypeVar("BlobClient") + ContainerClient = TypeVar("ContainerClient") + + +class BlobLeaseClient(LeaseClientBase): + """Creates a new BlobLeaseClient. + + This client provides lease operations on a BlobClient or ContainerClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the blob or container to lease. + :type client: ~azure.storage.blob.aio.BlobClient or + ~azure.storage.blob.aio.ContainerClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + + def __enter__(self): + raise TypeError("Async lease must use 'async with'.") + + def __exit__(self, *args): + self.release() + + async def __aenter__(self): + return self + + async def __aexit__(self, *args): + await self.release() + + @distributed_trace_async + async def acquire(self, lease_duration=-1, **kwargs): + # type: (int, Any) -> None + """Requests a new lease. + + If the container does not have an active lease, the Blob service creates a + lease on the container and returns a new lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.acquire_lease( + timeout=kwargs.pop('timeout', None), + duration=lease_duration, + proposed_lease_id=self.id, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + self.etag = response.get('etag') # type: str + + @distributed_trace_async + async def renew(self, **kwargs): + # type: (Any) -> None + """Renews the lease. + + The lease can be renewed if the lease ID specified in the + lease client matches that associated with the container or blob. Note that + the lease may be renewed even if it has expired as long as the container + or blob has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.renew_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def release(self, **kwargs): + # type: (Any) -> None + """Release the lease. + + The lease may be released if the client lease id specified matches + that associated with the container or blob. Releasing the lease allows another client + to immediately acquire the lease for the container or blob as soon as the release is complete. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.release_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """Change the lease ID of an active lease. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.change_lease( + lease_id=self.id, + proposed_lease_id=proposed_lease_id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def break_lease(self, lease_break_period=None, **kwargs): + # type: (Optional[int], Any) -> int + """Break the lease, if the container or blob has an active lease. + + Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. When a lease + is broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the container or blob. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :param int lease_break_period: + This is the proposed duration of seconds that the lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the lease. If longer, the time remaining on the lease is used. + A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.break_lease( + timeout=kwargs.pop('timeout', None), + break_period=lease_break_period, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_list_blobs_helper.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_list_blobs_helper.py new file mode 100644 index 0000000..058572f --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_list_blobs_helper.py @@ -0,0 +1,163 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from azure.core.async_paging import AsyncPageIterator, AsyncItemPaged +from azure.core.exceptions import HttpResponseError +from .._deserialize import get_blob_properties_from_generated_code +from .._models import BlobProperties +from .._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix +from .._shared.models import DictMixin +from .._shared.response_handlers import return_context_and_deserialized, process_storage_error + + +class BlobPropertiesPaged(AsyncPageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.models.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str container: The container that the blobs are listed from. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + prefix=None, + results_per_page=None, + continuation_token=None, + delimiter=None, + location_mode=None): + super(BlobPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.container = container + self.delimiter = delimiter + self.current_page = None + self.location_mode = location_mode + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + prefix=self.prefix, + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.container = self._response.container_name + self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] + + return self._response.next_marker or None, self.current_page + + def _build_item(self, item): + if isinstance(item, BlobProperties): + return item + if isinstance(item, BlobItemInternal): + blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access + blob.container = self.container + return blob + return item + + +class BlobPrefix(AsyncItemPaged, DictMixin): + """An Iterable of Blob properties. + + Returned from walk_blobs when a delimiter is used. + Can be thought of as a virtual blob directory. + + :ivar str name: The prefix, or "directory name" of the blob. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str marker: The continuation token of the current page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.models.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str marker: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__(self, *args, **kwargs): + super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) + self.name = kwargs.get('prefix') + self.prefix = kwargs.get('prefix') + self.results_per_page = kwargs.get('results_per_page') + self.container = kwargs.get('container') + self.delimiter = kwargs.get('delimiter') + self.location_mode = kwargs.get('location_mode') + + +class BlobPrefixPaged(BlobPropertiesPaged): + def __init__(self, *args, **kwargs): + super(BlobPrefixPaged, self).__init__(*args, **kwargs) + self.name = self.prefix + + async def _extract_data_cb(self, get_next_return): + continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) + self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items + self.current_page = [self._build_item(item) for item in self.current_page] + self.delimiter = self._response.delimiter + + return continuation_token, self.current_page + + def _build_item(self, item): + item = super(BlobPrefixPaged, self)._build_item(item) + if isinstance(item, GenBlobPrefix): + return BlobPrefix( + self._command, + container=self.container, + prefix=item.name, + results_per_page=self.results_per_page, + location_mode=self.location_mode) + return item diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_models.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_models.py new file mode 100644 index 0000000..05edd78 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_models.py @@ -0,0 +1,143 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines + +from azure.core.async_paging import AsyncPageIterator +from azure.core.exceptions import HttpResponseError +from .._deserialize import parse_tags + +from .._models import ContainerProperties, FilteredBlob +from .._shared.response_handlers import return_context_and_deserialized, process_storage_error + +from .._generated.models import FilterBlobItem + + +class ContainerPropertiesPaged(AsyncPageIterator): + """An Iterable of Container properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A container name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.models.ContainerProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only containers whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of container names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(ContainerPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [self._build_item(item) for item in self._response.container_items] + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_item(item): + return ContainerProperties._from_generated(item) # pylint: disable=protected-access + + +class FilteredBlobPaged(AsyncPageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.BlobProperties) + :ivar str container: The container that the blobs are listed from. + + :param callable command: Function to retrieve the next page of items. + :param str container: The name of the container. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + results_per_page=None, + continuation_token=None, + location_mode=None): + super(FilteredBlobPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.marker = continuation_token + self.results_per_page = results_per_page + self.container = container + self.current_page = None + self.location_mode = location_mode + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.marker = self._response.next_marker + self.current_page = [self._build_item(item) for item in self._response.blobs] + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_item(item): + if isinstance(item, FilterBlobItem): + tags = parse_tags(item.tags) + blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags) + return blob + return item diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/aio/_upload_helpers.py b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_upload_helpers.py new file mode 100644 index 0000000..985e731 --- /dev/null +++ b/azure/multiapi/storagev2/blob/v2020_10_02/aio/_upload_helpers.py @@ -0,0 +1,281 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from io import SEEK_SET, UnsupportedOperation +from typing import Optional, Union, Any, TypeVar, TYPE_CHECKING # pylint: disable=unused-import + +import six +from azure.core.exceptions import ResourceModifiedError, HttpResponseError + +from .._shared.response_handlers import ( + process_storage_error, + return_response_headers) +from .._shared.uploads_async import ( + upload_data_chunks, + upload_substream_blocks, + BlockBlobChunkUploader, + PageBlobChunkUploader, + AppendBlobChunkUploader) +from .._shared.encryption import generate_blob_encryption_data, encrypt_blob +from .._generated.models import ( + BlockLookupList, + AppendPositionAccessConditions, + ModifiedAccessConditions, +) +from .._upload_helpers import _convert_mod_error, _any_conditions + +if TYPE_CHECKING: + from datetime import datetime # pylint: disable=unused-import + BlobLeaseClient = TypeVar("BlobLeaseClient") + + +async def upload_block_blob( # pylint: disable=too-many-locals + client=None, + data=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if not overwrite and not _any_conditions(**kwargs): + kwargs['modified_access_conditions'].if_none_match = '*' + adjusted_count = length + if (encryption_options.get('key') is not None) and (adjusted_count is not None): + adjusted_count += (16 - (length % 16)) + blob_headers = kwargs.pop('blob_headers', None) + tier = kwargs.pop('standard_blob_tier', None) + blob_tags_string = kwargs.pop('blob_tags_string', None) + + immutability_policy = kwargs.pop('immutability_policy', None) + immutability_policy_expiry = None if immutability_policy is None else immutability_policy.expiry_time + immutability_policy_mode = None if immutability_policy is None else immutability_policy.policy_mode + legal_hold = kwargs.pop('legal_hold', None) + + # Do single put if the size is smaller than config.max_single_put_size + if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): + try: + data = data.read(length) + if not isinstance(data, six.binary_type): + raise TypeError('Blob data should be of type bytes.') + except AttributeError: + pass + if encryption_options.get('key'): + encryption_data, data = encrypt_blob(data, encryption_options['key']) + headers['x-ms-meta-encryptiondata'] = encryption_data + return await client.upload( + body=data, + content_length=adjusted_count, + blob_http_headers=blob_headers, + headers=headers, + cls=return_response_headers, + validate_content=validate_content, + data_stream_total=adjusted_count, + upload_stream_current=0, + tier=tier.value if tier else None, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + **kwargs) + + use_original_upload_path = blob_settings.use_byte_buffer or \ + validate_content or encryption_options.get('required') or \ + blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ + hasattr(stream, 'seekable') and not stream.seekable() or \ + not hasattr(stream, 'seek') or not hasattr(stream, 'tell') + + if use_original_upload_path: + if encryption_options.get('key'): + cek, iv, encryption_data = generate_blob_encryption_data(encryption_options['key']) + headers['x-ms-meta-encryptiondata'] = encryption_data + encryption_options['cek'] = cek + encryption_options['vector'] = iv + block_ids = await upload_data_chunks( + service=client, + uploader_class=BlockBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + encryption_options=encryption_options, + headers=headers, + **kwargs + ) + else: + block_ids = await upload_substream_blocks( + service=client, + uploader_class=BlockBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + headers=headers, + **kwargs + ) + + block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) + block_lookup.latest = block_ids + return await client.commit_block_list( + block_lookup, + blob_http_headers=blob_headers, + cls=return_response_headers, + validate_content=validate_content, + headers=headers, + tier=tier.value if tier else None, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + **kwargs) + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceModifiedError as mod_error: + if not overwrite: + _convert_mod_error(mod_error) + raise + + +async def upload_page_blob( + client=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if not overwrite and not _any_conditions(**kwargs): + kwargs['modified_access_conditions'].if_none_match = '*' + if length is None or length < 0: + raise ValueError("A content length must be specified for a Page Blob.") + if length % 512 != 0: + raise ValueError("Invalid page blob size: {0}. " + "The size must be aligned to a 512-byte boundary.".format(length)) + if kwargs.get('premium_page_blob_tier'): + premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') + try: + headers['x-ms-access-tier'] = premium_page_blob_tier.value + except AttributeError: + headers['x-ms-access-tier'] = premium_page_blob_tier + if encryption_options and encryption_options.get('data'): + headers['x-ms-meta-encryptiondata'] = encryption_options['data'] + blob_tags_string = kwargs.pop('blob_tags_string', None) + + response = await client.create( + content_length=0, + blob_content_length=length, + blob_sequence_number=None, + blob_http_headers=kwargs.pop('blob_headers', None), + blob_tags_string=blob_tags_string, + cls=return_response_headers, + headers=headers, + **kwargs) + if length == 0: + return response + + kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) + return await upload_data_chunks( + service=client, + uploader_class=PageBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_page_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + encryption_options=encryption_options, + headers=headers, + **kwargs) + + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceModifiedError as mod_error: + if not overwrite: + _convert_mod_error(mod_error) + raise + + +async def upload_append_blob( # pylint: disable=unused-argument + client=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if length == 0: + return {} + blob_headers = kwargs.pop('blob_headers', None) + append_conditions = AppendPositionAccessConditions( + max_size=kwargs.pop('maxsize_condition', None), + append_position=None) + blob_tags_string = kwargs.pop('blob_tags_string', None) + + try: + if overwrite: + await client.create( + content_length=0, + blob_http_headers=blob_headers, + headers=headers, + blob_tags_string=blob_tags_string, + **kwargs) + return await upload_data_chunks( + service=client, + uploader_class=AppendBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + append_position_access_conditions=append_conditions, + headers=headers, + **kwargs) + except HttpResponseError as error: + if error.response.status_code != 404: + raise + # rewind the request body if it is a stream + if hasattr(stream, 'read'): + try: + # attempt to rewind the body to the initial position + stream.seek(0, SEEK_SET) + except UnsupportedOperation: + # if body is not seekable, then retry would not work + raise error + await client.create( + content_length=0, + blob_http_headers=blob_headers, + headers=headers, + blob_tags_string=blob_tags_string, + **kwargs) + return await upload_data_chunks( + service=client, + uploader_class=AppendBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + append_position_access_conditions=append_conditions, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) diff --git a/azure/multiapi/storagev2/blob/v2020_10_02/py.typed b/azure/multiapi/storagev2/blob/v2020_10_02/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_directory_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_directory_client.py index 22eefba..c42391e 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_directory_client.py +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_directory_client.py @@ -200,7 +200,7 @@ def delete_directory(self, **kwargs): :dedent: 4 :caption: Delete directory. """ - return self._delete(**kwargs) + return self._delete(recursive=True, **kwargs) def get_directory_properties(self, **kwargs): # type: (**Any) -> DirectoryProperties diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_file_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_file_client.py index 0a452c1..e15842d 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_file_client.py +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_data_lake_file_client.py @@ -318,6 +318,7 @@ def _upload_options( # pylint:disable=too-many-statements kwargs['validate_content'] = validate_content kwargs['max_concurrency'] = max_concurrency kwargs['client'] = self._client.path + kwargs['file_settings'] = self._config return kwargs @@ -365,6 +366,15 @@ def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. :keyword str etag: An ETag value, or the wildcard character (*). Used to check if the resource has changed, and act according to the condition specified by the `match_condition` parameter. @@ -547,7 +557,7 @@ def download_file(self, offset=None, length=None, **kwargs): # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader """Downloads a file to the StorageStreamDownloader. The readall() method must be used to read all the content, or readinto() must be used to download the file into - a stream. + a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. :param int offset: Start of byte range to use for downloading a section of the file. diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_download.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_download.py index e4efd8c..61716d3 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_download.py +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_download.py @@ -3,6 +3,8 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- +from typing import Iterator + from ._deserialize import from_blob_properties @@ -29,6 +31,11 @@ def __len__(self): return self.size def chunks(self): + # type: () -> Iterator[bytes] + """Iterate over chunks in the download stream. + + :rtype: Iterator[bytes] + """ return self._downloader.chunks() def readall(self): diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_file_system_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_file_system_client.py index 2aec1e6..c0060a5 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_file_system_client.py +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_file_system_client.py @@ -275,6 +275,7 @@ def _rename_file_system(self, new_name, **kwargs): :rtype: ~azure.storage.filedatalake.FileSystemClient """ self._container_client._rename_container(new_name, **kwargs) # pylint: disable=protected-access + #TODO: self._raw_credential would not work with SAS tokens renamed_file_system = FileSystemClient( "{}://{}".format(self.scheme, self.primary_hostname), file_system_name=new_name, credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_path_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_path_client.py index 7454d43..0518141 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_path_client.py +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_path_client.py @@ -210,7 +210,6 @@ def _delete_path_options(**kwargs): mod_conditions = get_mod_conditions(kwargs) options = { - 'recursive': True, 'lease_access_conditions': access_conditions, 'modified_access_conditions': mod_conditions, 'timeout': kwargs.pop('timeout', None)} diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client.py index 7b0258f..5e524b2 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client.py +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/base_client.py @@ -3,19 +3,13 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- - +import logging +import uuid from typing import ( # pylint: disable=unused-import - Union, Optional, Any, - Iterable, - Dict, - List, - Type, Tuple, - TYPE_CHECKING, ) -import logging try: from urllib.parse import parse_qs, quote @@ -45,6 +39,7 @@ from .models import LocationMode from .authentication import SharedKeyCredentialPolicy from .shared_access_signature import QueryStringConstants +from .request_handlers import serialize_batch_body, _get_batch_request_delimiter from .policies import ( StorageHeadersPolicy, StorageContentValidation, @@ -61,13 +56,12 @@ _LOGGER = logging.getLogger(__name__) _SERVICE_PARAMS = { - "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, - "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, - "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, - "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"}, + "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, + "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, + "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, + "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, } - class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes def __init__( self, @@ -262,33 +256,52 @@ def _create_pipeline(self, credential, **kwargs): return config, Pipeline(config.transport, policies=policies) def _batch_send( - self, *reqs, # type: HttpRequest + self, + *reqs, # type: HttpRequest **kwargs ): """Given a series of request, do a Storage batch call. """ # Pop it here, so requests doesn't feel bad about additional kwarg raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + batch_id = str(uuid.uuid1()) + request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), + url='{}://{}/{}?{}comp=batch{}{}'.format( + self.scheme, + self.primary_hostname, + kwargs.pop('path', ""), + kwargs.pop('restype', ""), + kwargs.pop('sas', ""), + kwargs.pop('timeout', "") + ), headers={ - 'x-ms-version': self.api_version + 'x-ms-version': self.api_version, + "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) } ) + policies = [StorageHeadersPolicy()] + if self._credential_policy: + policies.append(self._credential_policy) + request.set_multipart_mixed( *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], + policies=policies, enforce_https=False ) + Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access + body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) + request.set_bytes_body(body) + + temp = request.multipart_mixed_info + request.multipart_mixed_info = None pipeline_response = self._pipeline.run( request, **kwargs ) response = pipeline_response.http_response + request.multipart_mixed_info = temp try: if response.status_code not in [202]: @@ -350,15 +363,15 @@ def parse_connection_str(conn_str, credential, service): conn_settings = [s.split("=", 1) for s in conn_str.split(";")] if any(len(tup) != 2 for tup in conn_settings): raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict(conn_settings) + conn_settings = dict((key.upper(), val) for key, val in conn_settings) endpoints = _SERVICE_PARAMS[service] primary = None secondary = None if not credential: try: - credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]} + credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} except KeyError: - credential = conn_settings.get("SharedAccessSignature") + credential = conn_settings.get("SHAREDACCESSSIGNATURE") if endpoints["primary"] in conn_settings: primary = conn_settings[endpoints["primary"]] if endpoints["secondary"] in conn_settings: @@ -368,13 +381,13 @@ def parse_connection_str(conn_str, credential, service): raise ValueError("Connection string specifies only secondary endpoint.") try: primary = "{}://{}.{}.{}".format( - conn_settings["DefaultEndpointsProtocol"], - conn_settings["AccountName"], + conn_settings["DEFAULTENDPOINTSPROTOCOL"], + conn_settings["ACCOUNTNAME"], service, - conn_settings["EndpointSuffix"], + conn_settings["ENDPOINTSUFFIX"], ) secondary = "{}-secondary.{}.{}".format( - conn_settings["AccountName"], service, conn_settings["EndpointSuffix"] + conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] ) except KeyError: pass @@ -382,7 +395,7 @@ def parse_connection_str(conn_str, credential, service): if not primary: try: primary = "https://{}.{}.{}".format( - conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE) + conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) ) except KeyError: raise ValueError("Connection string missing required connection details.") @@ -411,6 +424,9 @@ def create_configuration(**kwargs): # Page blob uploads config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) + # Datalake file uploads + config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) + # Blob downloads config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/request_handlers.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/request_handlers.py index 4f15b65..37354d7 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/request_handlers.py +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/request_handlers.py @@ -20,6 +20,10 @@ _LOGGER = logging.getLogger(__name__) +_REQUEST_DELIMITER_PREFIX = "batch_" +_HTTP1_1_IDENTIFIER = "HTTP/1.1" +_HTTP_LINE_ENDING = "\r\n" + def serialize_iso(attr): """Serialize Datetime object into ISO-8601 formatted string. @@ -145,3 +149,125 @@ def add_metadata_headers(metadata=None): for key, value in metadata.items(): headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value return headers + + +def serialize_batch_body(requests, batch_id): + """ + -- + + -- + (repeated as needed) + ---- + + Serializes the requests in this batch to a single HTTP mixed/multipart body. + + :param list[~azure.core.pipeline.transport.HttpRequest] requests: + a list of sub-request for the batch request + :param str batch_id: + to be embedded in batch sub-request delimiter + :return: The body bytes for this batch. + """ + + if requests is None or len(requests) == 0: + raise ValueError('Please provide sub-request(s) for this batch request') + + delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') + newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') + batch_body = list() + + content_index = 0 + for request in requests: + request.headers.update({ + "Content-ID": str(content_index), + "Content-Length": str(0) + }) + batch_body.append(delimiter_bytes) + batch_body.append(_make_body_from_sub_request(request)) + batch_body.append(newline_bytes) + content_index += 1 + + batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) + # final line of body MUST have \r\n at the end, or it will not be properly read by the service + batch_body.append(newline_bytes) + + return bytes().join(batch_body) + + +def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): + """ + Gets the delimiter used for this batch request's mixed/multipart HTTP format. + + :param str batch_id: + Randomly generated id + :param bool is_prepend_dashes: + Whether to include the starting dashes. Used in the body, but non on defining the delimiter. + :param bool is_append_dashes: + Whether to include the ending dashes. Used in the body on the closing delimiter only. + :return: The delimiter, WITHOUT a trailing newline. + """ + + prepend_dashes = '--' if is_prepend_dashes else '' + append_dashes = '--' if is_append_dashes else '' + + return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes + + +def _make_body_from_sub_request(sub_request): + """ + Content-Type: application/http + Content-ID: + Content-Transfer-Encoding: (if present) + + HTTP/ +
:
(repeated as necessary) + Content-Length: + (newline if content length > 0) + (if content length > 0) + + Serializes an http request. + + :param ~azure.core.pipeline.transport.HttpRequest sub_request: + Request to serialize. + :return: The serialized sub-request in bytes + """ + + # put the sub-request's headers into a list for efficient str concatenation + sub_request_body = list() + + # get headers for ease of manipulation; remove headers as they are used + headers = sub_request.headers + + # append opening headers + sub_request_body.append("Content-Type: application/http") + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-ID: ") + sub_request_body.append(headers.pop("Content-ID", "")) + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-Transfer-Encoding: binary") + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + # append HTTP verb and path and query and HTTP version + sub_request_body.append(sub_request.method) + sub_request_body.append(' ') + sub_request_body.append(sub_request.url) + sub_request_body.append(' ') + sub_request_body.append(_HTTP1_1_IDENTIFIER) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) + for header_name, header_value in headers.items(): + if header_value is not None: + sub_request_body.append(header_name) + sub_request_body.append(": ") + sub_request_body.append(header_value) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads.py index 29949d5..1b619df 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads.py +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads.py @@ -77,13 +77,13 @@ def upload_data_chunks( validate_content=validate_content, **kwargs) if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + executor.submit(with_current_context(uploader.process_chunk), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) else: range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] if any(range_ids): @@ -112,16 +112,18 @@ def upload_substream_blocks( **kwargs) if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + executor.submit(with_current_context(uploader.process_substream_block), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) else: range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - return sorted(range_ids) + if any(range_ids): + return sorted(range_ids) + return [] class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes @@ -221,16 +223,16 @@ def get_substream_blocks(self): for i in range(blocks): index = i * self.chunk_size length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) + yield index, SubStream(self.stream, index, length, lock) def process_substream_block(self, block_data): return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - def _upload_substream_block(self, block_id, block_stream): + def _upload_substream_block(self, index, block_stream): raise NotImplementedError("Must be implemented by child class.") - def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = self._upload_substream_block(block_id, block_stream) + def _upload_substream_block_with_progress(self, index, block_stream): + range_id = self._upload_substream_block(index, block_stream) self._update_progress(len(block_stream)) return range_id @@ -260,8 +262,9 @@ def _upload_chunk(self, chunk_offset, chunk_data): ) return index, block_id - def _upload_substream_block(self, block_id, block_stream): + def _upload_substream_block(self, index, block_stream): try: + block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) self.service.stage_block( block_id, len(block_stream), @@ -289,7 +292,7 @@ def _upload_chunk(self, chunk_offset, chunk_data): content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) computed_md5 = None self.response_headers = self.service.upload_pages( - chunk_data, + body=chunk_data, content_length=len(chunk_data), transactional_content_md5=computed_md5, range=content_range, @@ -302,6 +305,9 @@ def _upload_chunk(self, chunk_offset, chunk_data): if not self.parallel and self.request_options.get('modified_access_conditions'): self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + def _upload_substream_block(self, index, block_stream): + pass + class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method @@ -312,7 +318,7 @@ def __init__(self, *args, **kwargs): def _upload_chunk(self, chunk_offset, chunk_data): if self.current_length is None: self.response_headers = self.service.append_block( - chunk_data, + body=chunk_data, content_length=len(chunk_data), cls=return_response_headers, data_stream_total=self.total_size, @@ -324,7 +330,7 @@ def _upload_chunk(self, chunk_offset, chunk_data): self.request_options['append_position_access_conditions'].append_position = \ self.current_length + chunk_offset self.response_headers = self.service.append_block( - chunk_data, + body=chunk_data, content_length=len(chunk_data), cls=return_response_headers, data_stream_total=self.total_size, @@ -332,6 +338,9 @@ def _upload_chunk(self, chunk_offset, chunk_data): **self.request_options ) + def _upload_substream_block(self, index, block_stream): + pass + class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method @@ -350,6 +359,20 @@ def _upload_chunk(self, chunk_offset, chunk_data): if not self.parallel and self.request_options.get('modified_access_conditions'): self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + def _upload_substream_block(self, index, block_stream): + try: + self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method @@ -366,6 +389,10 @@ def _upload_chunk(self, chunk_offset, chunk_data): ) return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response + # TODO: Implement this method. + def _upload_substream_block(self, index, block_stream): + pass + class SubStream(IOBase): @@ -450,6 +477,13 @@ def read(self, size=None): raise IOError("Stream failed to seek to the desired location.") buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) else: + absolute_position = self._stream_begin_index + self._position + # It's possible that there's connection problem during data transfer, + # so when we retry we don't want to read from current position of wrapped stream, + # instead we should seek to where we want to read from. + if self._wrapped_stream.tell() != absolute_position: + self._wrapped_stream.seek(absolute_position, SEEK_SET) + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) if buffer_from_stream: diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads_async.py index 29c0ee4..5ed192b 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads_async.py +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_shared/uploads_async.py @@ -124,7 +124,9 @@ async def upload_substream_blocks( range_ids = [] for block in uploader.get_substream_blocks(): range_ids.append(await uploader.process_substream_block(block)) - return sorted(range_ids) + if any(range_ids): + return sorted(range_ids) + return class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes @@ -224,16 +226,16 @@ def get_substream_blocks(self): for i in range(blocks): index = i * self.chunk_size length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) + yield index, SubStream(self.stream, index, length, lock) async def process_substream_block(self, block_data): return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - async def _upload_substream_block(self, block_id, block_stream): + async def _upload_substream_block(self, index, block_stream): raise NotImplementedError("Must be implemented by child class.") - async def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = await self._upload_substream_block(block_id, block_stream) + async def _upload_substream_block_with_progress(self, index, block_stream): + range_id = await self._upload_substream_block(index, block_stream) await self._update_progress(len(block_stream)) return range_id @@ -256,14 +258,15 @@ async def _upload_chunk(self, chunk_offset, chunk_data): await self.service.stage_block( block_id, len(chunk_data), - chunk_data, + body=chunk_data, data_stream_total=self.total_size, upload_stream_current=self.progress_total, **self.request_options) return index, block_id - async def _upload_substream_block(self, block_id, block_stream): + async def _upload_substream_block(self, index, block_stream): try: + block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) await self.service.stage_block( block_id, len(block_stream), @@ -293,7 +296,7 @@ async def _upload_chunk(self, chunk_offset, chunk_data): content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) computed_md5 = None self.response_headers = await self.service.upload_pages( - chunk_data, + body=chunk_data, content_length=len(chunk_data), transactional_content_md5=computed_md5, range=content_range, @@ -305,6 +308,9 @@ async def _upload_chunk(self, chunk_offset, chunk_data): if not self.parallel and self.request_options.get('modified_access_conditions'): self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + async def _upload_substream_block(self, index, block_stream): + pass + class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method @@ -315,7 +321,7 @@ def __init__(self, *args, **kwargs): async def _upload_chunk(self, chunk_offset, chunk_data): if self.current_length is None: self.response_headers = await self.service.append_block( - chunk_data, + body=chunk_data, content_length=len(chunk_data), cls=return_response_headers, data_stream_total=self.total_size, @@ -326,13 +332,16 @@ async def _upload_chunk(self, chunk_offset, chunk_data): self.request_options['append_position_access_conditions'].append_position = \ self.current_length + chunk_offset self.response_headers = await self.service.append_block( - chunk_data, + body=chunk_data, content_length=len(chunk_data), cls=return_response_headers, data_stream_total=self.total_size, upload_stream_current=self.progress_total, **self.request_options) + async def _upload_substream_block(self, index, block_stream): + pass + class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method @@ -350,18 +359,37 @@ async def _upload_chunk(self, chunk_offset, chunk_data): if not self.parallel and self.request_options.get('modified_access_conditions'): self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + async def _upload_substream_block(self, index, block_stream): + try: + await self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method async def _upload_chunk(self, chunk_offset, chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 response = await self.service.upload_range( chunk_data, chunk_offset, - chunk_end, + length, data_stream_total=self.total_size, upload_stream_current=self.progress_total, **self.request_options ) range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) return range_id, response + + # TODO: Implement this method. + async def _upload_substream_block(self, index, block_stream): + pass diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_upload_helper.py index 77db8bf..6d88c32 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_upload_helper.py +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_upload_helper.py @@ -10,7 +10,7 @@ from ._shared.response_handlers import return_response_headers from ._shared.uploads import ( upload_data_chunks, - DataLakeFileChunkUploader) + DataLakeFileChunkUploader, upload_substream_blocks) from azure.core.exceptions import HttpResponseError @@ -30,6 +30,7 @@ def upload_datalake_file( # pylint: disable=unused-argument overwrite=None, validate_content=None, max_concurrency=None, + file_settings=None, **kwargs): try: if length == 0: @@ -66,15 +67,32 @@ def upload_datalake_file( # pylint: disable=unused-argument modified_access_conditions.if_modified_since = None modified_access_conditions.if_unmodified_since = None - upload_data_chunks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - **kwargs) + use_original_upload_path = file_settings.use_byte_buffer or \ + validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \ + hasattr(stream, 'seekable') and not stream.seekable() or \ + not hasattr(stream, 'seek') or not hasattr(stream, 'tell') + + if use_original_upload_path: + upload_data_chunks( + service=client, + uploader_class=DataLakeFileChunkUploader, + total_size=length, + chunk_size=chunk_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + **kwargs) + else: + upload_substream_blocks( + service=client, + uploader_class=DataLakeFileChunkUploader, + total_size=length, + chunk_size=chunk_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + **kwargs + ) return client.flush_data(position=length, path_http_headers=path_http_headers, diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_version.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_version.py index caae3f7..bf23efa 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/_version.py +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/_version.py @@ -4,4 +4,4 @@ # license information. # -------------------------------------------------------------------------- -VERSION = "12.3.0" +VERSION = "12.3.1" diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_directory_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_directory_client_async.py index 6fbe94a..7d0adef 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_directory_client_async.py +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_directory_client_async.py @@ -180,7 +180,7 @@ async def delete_directory(self, **kwargs): :dedent: 4 :caption: Delete directory. """ - return await self._delete(**kwargs) + return await self._delete(recursive=True, **kwargs) async def get_directory_properties(self, **kwargs): # type: (**Any) -> DirectoryProperties diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_file_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_file_client_async.py index 34532c9..df25ecf 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_file_client_async.py +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_data_lake_file_client_async.py @@ -290,6 +290,15 @@ async def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[An If a date is passed in without timezone info, it is assumed to be UTC. Specify this header to perform the operation only if the resource has not been modified since the specified date/time. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. :keyword str etag: An ETag value, or the wildcard character (*). Used to check if the resource has changed, and act according to the condition specified by the `match_condition` parameter. @@ -422,7 +431,7 @@ async def download_file(self, offset=None, length=None, **kwargs): # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader """Downloads a file to the StorageStreamDownloader. The readall() method must be used to read all the content, or readinto() must be used to download the file into - a stream. + a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. :param int offset: Start of byte range to use for downloading a section of the file. diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_download_async.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_download_async.py index ea27438..5685478 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_download_async.py +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_download_async.py @@ -3,6 +3,8 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- +from typing import AsyncIterator + from .._deserialize import from_blob_properties @@ -29,6 +31,11 @@ def __len__(self): return self.size def chunks(self): + # type: () -> AsyncIterator[bytes] + """Iterate over chunks in the download stream. + + :rtype: AsyncIterator[bytes] + """ return self._downloader.chunks() async def readall(self): diff --git a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_upload_helper.py index 064b636..00d5bf1 100644 --- a/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_upload_helper.py +++ b/azure/multiapi/storagev2/filedatalake/v2020_02_10/aio/_upload_helper.py @@ -10,7 +10,7 @@ from .._shared.response_handlers import return_response_headers from .._shared.uploads_async import ( upload_data_chunks, - DataLakeFileChunkUploader) + DataLakeFileChunkUploader, upload_substream_blocks) def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument @@ -29,6 +29,7 @@ async def upload_datalake_file( # pylint: disable=unused-argument overwrite=None, validate_content=None, max_concurrency=None, + file_settings=None, **kwargs): try: if length == 0: @@ -65,15 +66,32 @@ async def upload_datalake_file( # pylint: disable=unused-argument modified_access_conditions.if_modified_since = None modified_access_conditions.if_unmodified_since = None - await upload_data_chunks( - service=client, - uploader_class=DataLakeFileChunkUploader, - total_size=length, - chunk_size=chunk_size, - stream=stream, - max_concurrency=max_concurrency, - validate_content=validate_content, - **kwargs) + use_original_upload_path = file_settings.use_byte_buffer or \ + validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \ + hasattr(stream, 'seekable') and not stream.seekable() or \ + not hasattr(stream, 'seek') or not hasattr(stream, 'tell') + + if use_original_upload_path: + await upload_data_chunks( + service=client, + uploader_class=DataLakeFileChunkUploader, + total_size=length, + chunk_size=chunk_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + **kwargs) + else: + await upload_substream_blocks( + service=client, + uploader_class=DataLakeFileChunkUploader, + total_size=length, + chunk_size=chunk_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + **kwargs + ) return await client.flush_data(position=length, path_http_headers=path_http_headers, diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/__init__.py new file mode 100644 index 0000000..99d2ef7 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/__init__.py @@ -0,0 +1,105 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ._download import StorageStreamDownloader +from ._data_lake_file_client import DataLakeFileClient +from ._data_lake_directory_client import DataLakeDirectoryClient +from ._file_system_client import FileSystemClient +from ._data_lake_service_client import DataLakeServiceClient +from ._data_lake_lease import DataLakeLeaseClient +from ._models import ( + LocationMode, + ResourceTypes, + FileSystemProperties, + FileSystemPropertiesPaged, + DirectoryProperties, + FileProperties, + PathProperties, + LeaseProperties, + ContentSettings, + AccountSasPermissions, + FileSystemSasPermissions, + DirectorySasPermissions, + FileSasPermissions, + UserDelegationKey, + PublicAccess, + AccessPolicy, + DelimitedTextDialect, + DelimitedJsonDialect, + ArrowDialect, + ArrowType, + QuickQueryDialect, + DataLakeFileQueryError, + AccessControlChangeResult, + AccessControlChangeCounters, + AccessControlChangeFailure, + AccessControlChanges, + AnalyticsLogging, + Metrics, + RetentionPolicy, + StaticWebsite, + CorsRule, + DeletedPathProperties +) + +from ._shared_access_signature import generate_account_sas, generate_file_system_sas, generate_directory_sas, \ + generate_file_sas + +from ._shared.policies import ExponentialRetry, LinearRetry +from ._shared.models import StorageErrorCode +from ._version import VERSION + +__version__ = VERSION + +__all__ = [ + 'DataLakeServiceClient', + 'FileSystemClient', + 'DataLakeFileClient', + 'DataLakeDirectoryClient', + 'DataLakeLeaseClient', + 'ExponentialRetry', + 'LinearRetry', + 'LocationMode', + 'PublicAccess', + 'AccessPolicy', + 'ResourceTypes', + 'StorageErrorCode', + 'UserDelegationKey', + 'FileSystemProperties', + 'FileSystemPropertiesPaged', + 'DirectoryProperties', + 'FileProperties', + 'PathProperties', + 'LeaseProperties', + 'ContentSettings', + 'AccessControlChangeResult', + 'AccessControlChangeCounters', + 'AccessControlChangeFailure', + 'AccessControlChanges', + 'AccountSasPermissions', + 'FileSystemSasPermissions', + 'DirectorySasPermissions', + 'FileSasPermissions', + 'generate_account_sas', + 'generate_file_system_sas', + 'generate_directory_sas', + 'generate_file_sas', + 'VERSION', + 'StorageStreamDownloader', + 'DelimitedTextDialect', + 'DelimitedJsonDialect', + 'DataLakeFileQueryError', + 'ArrowDialect', + 'ArrowType', + 'QuickQueryDialect', + 'DataLakeFileQueryError', + 'AnalyticsLogging', + 'Metrics', + 'RetentionPolicy', + 'StaticWebsite', + 'CorsRule', + 'DeletedPathProperties' +] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_directory_client.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_directory_client.py new file mode 100644 index 0000000..042fa05 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_directory_client.py @@ -0,0 +1,565 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from typing import Any + +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore +from azure.core.pipeline import Pipeline +from ._deserialize import deserialize_dir_properties +from ._shared.base_client import TransportWrapper, parse_connection_str +from ._data_lake_file_client import DataLakeFileClient +from ._models import DirectoryProperties, FileProperties +from ._path_client import PathClient + + +class DataLakeDirectoryClient(PathClient): + """A client to interact with the DataLake directory, even if the directory may not yet exist. + + For operations relating to a specific subdirectory or file under the directory, a directory client or file client + can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param directory_name: + The whole path of the directory. eg. {directory under file system}/{directory to interact with} + :type directory_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, and account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_instantiate_client.py + :start-after: [START instantiate_directory_client_from_conn_str] + :end-before: [END instantiate_directory_client_from_conn_str] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient from connection string. + """ + def __init__( + self, account_url, # type: str + file_system_name, # type: str + directory_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, path_name=directory_name, + credential=credential, **kwargs) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + file_system_name, # type: str + directory_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> DataLakeDirectoryClient + """ + Create DataLakeDirectoryClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param file_system_name: + The name of file system to interact with. + :type file_system_name: str + :param directory_name: + The name of directory to interact with. The directory is under file system. + :type directory_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, and account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :return a DataLakeDirectoryClient + :rtype ~azure.storage.filedatalake.DataLakeDirectoryClient + """ + account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') + return cls( + account_url, file_system_name=file_system_name, directory_name=directory_name, + credential=credential, **kwargs) + + def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create a new directory. + + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: response dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory.py + :start-after: [START create_directory] + :end-before: [END create_directory] + :language: python + :dedent: 8 + :caption: Create directory. + """ + return self._create('directory', metadata=metadata, **kwargs) + + def delete_directory(self, **kwargs): + # type: (...) -> None + """ + Marks the specified directory for deletion. + + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory.py + :start-after: [START delete_directory] + :end-before: [END delete_directory] + :language: python + :dedent: 4 + :caption: Delete directory. + """ + return self._delete(recursive=True, **kwargs) + + def get_directory_properties(self, **kwargs): + # type: (**Any) -> DirectoryProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the directory. It does not return the content of the directory. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: DirectoryProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory.py + :start-after: [START get_directory_properties] + :end-before: [END get_directory_properties] + :language: python + :dedent: 4 + :caption: Getting the properties for a file/directory. + """ + return self._get_path_properties(cls=deserialize_dir_properties, **kwargs) # pylint: disable=protected-access + + def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a directory exists and returns False otherwise. + + :kwarg int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + return self._exists(**kwargs) + + def rename_directory(self, new_name, **kwargs): + # type: (str, **Any) -> DataLakeDirectoryClient + """ + Rename the source directory. + + :param str new_name: + the new directory name the user want to rename to. + The value must have the following format: "{filesystem}/{directory}/{subdirectory}". + :keyword source_lease: + A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory.py + :start-after: [START rename_directory] + :end-before: [END rename_directory] + :language: python + :dedent: 4 + :caption: Rename the source directory. + """ + new_name = new_name.strip('/') + new_file_system = new_name.split('/')[0] + new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') + new_path = new_path_and_token[0] + try: + new_dir_sas = new_path_and_token[1] or self._query_str.strip('?') + except IndexError: + if not self._raw_credential and new_file_system != self.file_system_name: + raise ValueError("please provide the sas token for the new file") + if not self._raw_credential and new_file_system == self.file_system_name: + new_dir_sas = self._query_str.strip('?') + + new_directory_client = DataLakeDirectoryClient( + "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, directory_name=new_path, + credential=self._raw_credential or new_dir_sas, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + new_directory_client._rename_path( # pylint: disable=protected-access + '/{}/{}{}'.format(quote(unquote(self.file_system_name)), + quote(unquote(self.path_name)), + self._query_str), + **kwargs) + return new_directory_client + + def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Create a subdirectory and return the subdirectory client to be interacted with. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient for the subdirectory. + """ + subdir = self.get_sub_directory_client(sub_directory) + subdir.create_directory(metadata=metadata, **kwargs) + return subdir + + def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Marks the specified subdirectory for deletion. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient for the subdirectory + """ + subdir = self.get_sub_directory_client(sub_directory) + subdir.delete_directory(**kwargs) + return subdir + + def create_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Create a new file and return the file client to be interacted with. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + """ + file_client = self.get_file_client(file) + file_client.create_file(**kwargs) + return file_client + + def get_file_client(self, file # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. eg. directory/subdirectory/file + :type file: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake.DataLakeFileClient + """ + try: + file_path = file.get('name') + except AttributeError: + file_path = self.path_name + '/' + str(file) + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return DataLakeFileClient( + self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, + api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified subdirectory of the current directory. + + The sub subdirectory need not already exist. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient + """ + try: + subdir_path = sub_directory.get('name') + except AttributeError: + subdir_path = self.path_name + '/' + str(sub_directory) + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return DataLakeDirectoryClient( + self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, + api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_file_client.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_file_client.py new file mode 100644 index 0000000..fe074b3 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_file_client.py @@ -0,0 +1,781 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from io import BytesIO +from typing import Any + +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore + +import six + +from azure.core.exceptions import HttpResponseError +from ._quick_query_helper import DataLakeFileQueryReader +from ._shared.base_client import parse_connection_str +from ._shared.request_handlers import get_length, read_length +from ._shared.response_handlers import return_response_headers +from ._shared.uploads import IterStreamer +from ._upload_helper import upload_datalake_file +from ._download import StorageStreamDownloader +from ._path_client import PathClient +from ._serialize import get_mod_conditions, get_path_http_headers, get_access_conditions, add_metadata_headers, \ + convert_datetime_to_rfc1123 +from ._deserialize import process_storage_error, deserialize_file_properties +from ._models import FileProperties, DataLakeFileQueryError + + +class DataLakeFileClient(PathClient): + """A client to interact with the DataLake file, even if the file may not yet exist. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param file_path: + The whole file path, so that to interact with a specific file. + eg. "{directory}/{subdirectory}/{file}" + :type file_path: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_instantiate_client.py + :start-after: [START instantiate_file_client_from_conn_str] + :end-before: [END instantiate_file_client_from_conn_str] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient from connection string. + """ + def __init__( + self, account_url, # type: str + file_system_name, # type: str + file_path, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path, + credential=credential, **kwargs) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + file_system_name, # type: str + file_path, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> DataLakeFileClient + """ + Create DataLakeFileClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param file_system_name: The name of file system to interact with. + :type file_system_name: str + :param directory_name: The name of directory to interact with. The directory is under file system. + :type directory_name: str + :param file_name: The name of file to interact with. The file is under directory. + :type file_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :return a DataLakeFileClient + :rtype ~azure.storage.filedatalake.DataLakeFileClient + """ + account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') + return cls( + account_url, file_system_name=file_system_name, file_path=file_path, + credential=credential, **kwargs) + + def create_file(self, content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create a new file. + + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: response dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START create_file] + :end-before: [END create_file] + :language: python + :dedent: 4 + :caption: Create file. + """ + return self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) + + def delete_file(self, **kwargs): + # type: (...) -> None + """ + Marks the specified file for deletion. + + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START delete_file] + :end-before: [END delete_file] + :language: python + :dedent: 4 + :caption: Delete file. + """ + return self._delete(**kwargs) + + def get_file_properties(self, **kwargs): + # type: (**Any) -> FileProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file. It does not return the content of the file. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START get_file_properties] + :end-before: [END get_file_properties] + :language: python + :dedent: 4 + :caption: Getting the properties for a file. + """ + return self._get_path_properties(cls=deserialize_file_properties, **kwargs) # pylint: disable=protected-access + + def set_file_expiry(self, expiry_options, # type: str + expires_on=None, # type: Optional[Union[datetime, int]] + **kwargs): + # type: (str, Optional[Union[datetime, int]], **Any) -> None + """Sets the time a file will expire and be deleted. + + :param str expiry_options: + Required. Indicates mode of the expiry time. + Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute' + :param datetime or int expires_on: + The time to set the file to expiry. + When expiry_options is RelativeTo*, expires_on should be an int in milliseconds. + If the type of expires_on is datetime, it should be in UTC time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + try: + expires_on = convert_datetime_to_rfc1123(expires_on) + except AttributeError: + expires_on = str(expires_on) + self._datalake_client_for_blob_operation.path \ + .set_expiry(expiry_options, expires_on=expires_on, **kwargs) # pylint: disable=protected-access + + def _upload_options( # pylint:disable=too-many-statements + self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + + encoding = kwargs.pop('encoding', 'UTF-8') + if isinstance(data, six.text_type): + data = data.encode(encoding) # type: ignore + if length is None: + length = get_length(data) + if isinstance(data, bytes): + data = data[:length] + + if isinstance(data, bytes): + stream = BytesIO(data) + elif hasattr(data, 'read'): + stream = data + elif hasattr(data, '__iter__'): + stream = IterStreamer(data, encoding=encoding) + else: + raise TypeError("Unsupported data type: {}".format(type(data))) + + validate_content = kwargs.pop('validate_content', False) + content_settings = kwargs.pop('content_settings', None) + metadata = kwargs.pop('metadata', None) + max_concurrency = kwargs.pop('max_concurrency', 1) + + kwargs['properties'] = add_metadata_headers(metadata) + kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) + kwargs['modified_access_conditions'] = get_mod_conditions(kwargs) + + if content_settings: + kwargs['path_http_headers'] = get_path_http_headers(content_settings) + + kwargs['stream'] = stream + kwargs['length'] = length + kwargs['validate_content'] = validate_content + kwargs['max_concurrency'] = max_concurrency + kwargs['client'] = self._client.path + kwargs['file_settings'] = self._config + + return kwargs + + def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + overwrite=False, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Any] + """ + Upload data to a file. + + :param data: Content to be uploaded to file + :param int length: Size of the data in bytes. + :param bool overwrite: to overwrite an existing file or not. + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword metadata: + Name-value pairs associated with the blob as metadata. + :paramtype metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword int chunk_size: + The maximum chunk size for uploading a file in chunks. + Defaults to 100*1024*1024, or 100MB. + :return: response dict (Etag and last modified). + """ + options = self._upload_options( + data, + length=length, + overwrite=overwrite, + **kwargs) + return upload_datalake_file(**options) + + @staticmethod + def _append_data_options(data, offset, length=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + + if isinstance(data, six.text_type): + data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore + if length is None: + length = get_length(data) + if length is None: + length, data = read_length(data) + if isinstance(data, bytes): + data = data[:length] + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + options = { + 'body': data, + 'position': offset, + 'content_length': length, + 'lease_access_conditions': access_conditions, + 'validate_content': kwargs.pop('validate_content', False), + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + offset, # type: int + length=None, # type: Optional[int] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """Append data to the file. + + :param data: Content to be appended to file + :param offset: start position of the data to be appended to. + :param length: Size of the data in bytes. + :keyword bool validate_content: + If true, calculates an MD5 hash of the block content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + file. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :return: dict of the response header + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START append_data] + :end-before: [END append_data] + :language: python + :dedent: 4 + :caption: Append data to the file. + """ + options = self._append_data_options( + data, + offset, + length=length, + **kwargs) + try: + return self._client.path.append_data(**options) + except HttpResponseError as error: + process_storage_error(error) + + @staticmethod + def _flush_data_options(offset, content_settings=None, retain_uncommitted_data=False, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_mod_conditions(kwargs) + + path_http_headers = None + if content_settings: + path_http_headers = get_path_http_headers(content_settings) + + options = { + 'position': offset, + 'content_length': 0, + 'path_http_headers': path_http_headers, + 'retain_uncommitted_data': retain_uncommitted_data, + 'close': kwargs.pop('close', False), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + def flush_data(self, offset, # type: int + retain_uncommitted_data=False, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ Commit the previous appended data. + + :param offset: offset is equal to the length of the file after commit the + previous appended data. + :param bool retain_uncommitted_data: Valid only for flush operations. If + "true", uncommitted data is retained after the flush operation + completes; otherwise, the uncommitted data is deleted after the flush + operation. The default is false. Data at offsets less than the + specified position are written to the file when flush succeeds, but + this optional parameter allows data after the flush position to be + retained for a future flush operation. + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword bool close: Azure Storage Events allow applications to receive + notifications when files change. When Azure Storage Events are + enabled, a file changed event is raised. This event has a property + indicating whether this is the final change to distinguish the + difference between an intermediate flush to a file stream and the + final close of a file stream. The close query parameter is valid only + when the action is "flush" and change notifications are enabled. If + the value of close is "true" and the flush operation completes + successfully, the service raises a file change notification with a + property indicating that this is the final update (the file stream has + been closed). If "false" a change notification is raised indicating + the file has changed. The default is false. This query parameter is + set to true by the Hadoop ABFS driver to indicate that the file stream + has been closed." + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :return: response header in dict + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START upload_file_to_file_system] + :end-before: [END upload_file_to_file_system] + :language: python + :dedent: 8 + :caption: Commit the previous appended data. + """ + options = self._flush_data_options( + offset, + retain_uncommitted_data=retain_uncommitted_data, **kwargs) + try: + return self._client.path.flush_data(**options) + except HttpResponseError as error: + process_storage_error(error) + + def download_file(self, offset=None, length=None, **kwargs): + # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader + """Downloads a file to the StorageStreamDownloader. The readall() method must + be used to read all the content, or readinto() must be used to download the file into + a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. + + :param int offset: + Start of byte range to use for downloading a section of the file. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword lease: + If specified, download only succeeds if the file's lease is active + and matches this ID. Required if the file has an active lease. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.filedatalake.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START read_file] + :end-before: [END read_file] + :language: python + :dedent: 4 + :caption: Return the downloaded data. + """ + downloader = self._blob_client.download_blob(offset=offset, length=length, **kwargs) + return StorageStreamDownloader(downloader) + + def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a file exists and returns False otherwise. + + :kwarg int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + return self._exists(**kwargs) + + def rename_file(self, new_name, **kwargs): + # type: (str, **Any) -> DataLakeFileClient + """ + Rename the source file. + + :param str new_name: the new file name the user want to rename to. + The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword source_lease: A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: the renamed file client + :rtype: DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download.py + :start-after: [START rename_file] + :end-before: [END rename_file] + :language: python + :dedent: 4 + :caption: Rename the source file. + """ + new_name = new_name.strip('/') + new_file_system = new_name.split('/')[0] + new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') + new_path = new_path_and_token[0] + try: + new_file_sas = new_path_and_token[1] or self._query_str.strip('?') + except IndexError: + if not self._raw_credential and new_file_system != self.file_system_name: + raise ValueError("please provide the sas token for the new file") + if not self._raw_credential and new_file_system == self.file_system_name: + new_file_sas = self._query_str.strip('?') + + new_file_client = DataLakeFileClient( + "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, file_path=new_path, + credential=self._raw_credential or new_file_sas, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function + ) + new_file_client._rename_path( # pylint: disable=protected-access + '/{}/{}{}'.format(quote(unquote(self.file_system_name)), + quote(unquote(self.path_name)), + self._query_str), + **kwargs) + return new_file_client + + def query_file(self, query_expression, **kwargs): + # type: (str, **Any) -> DataLakeFileQueryReader + """ + Enables users to select/project on datalake file data by providing simple query expressions. + This operations returns a DataLakeFileQueryReader, users need to use readall() or readinto() to get query data. + + :param str query_expression: + Required. a query statement. + eg. Select * from DataLakeStorage + :keyword Callable[~azure.storage.filedatalake.DataLakeFileQueryError] on_error: + A function to be called on any processing errors returned by the service. + :keyword file_format: + Optional. Defines the serialization of the data currently stored in the file. The default is to + treat the file data as CSV data formatted in the default dialect. This can be overridden with + a custom DelimitedTextDialect, or DelimitedJsonDialect or "ParquetDialect" (passed as a string or enum). + These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string. + :paramtype file_format: + ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect or + ~azure.storage.filedatalake.QuickQueryDialect or str + :keyword output_format: + Optional. Defines the output serialization for the data stream. By default the data will be returned + as it is represented in the file. By providing an output format, + the file data will be reformatted according to that profile. + This value can be a DelimitedTextDialect or a DelimitedJsonDialect or ArrowDialect. + These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string. + :paramtype output_format: + ~azure.storage.filedatalake.DelimitedTextDialect or ~azure.storage.filedatalake.DelimitedJsonDialect + or list[~azure.storage.filedatalake.ArrowDialect] or ~azure.storage.filedatalake.QuickQueryDialect or str + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A streaming object (DataLakeFileQueryReader) + :rtype: ~azure.storage.filedatalake.DataLakeFileQueryReader + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_query.py + :start-after: [START query] + :end-before: [END query] + :language: python + :dedent: 4 + :caption: select/project on datalake file data by providing simple query expressions. + """ + query_expression = query_expression.replace("from DataLakeStorage", "from BlobStorage") + blob_quick_query_reader = self._blob_client.query_blob(query_expression, + blob_format=kwargs.pop('file_format', None), + error_cls=DataLakeFileQueryError, + **kwargs) + return DataLakeFileQueryReader(blob_quick_query_reader) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_lease.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_lease.py new file mode 100644 index 0000000..ccdf525 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_lease.py @@ -0,0 +1,245 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import uuid + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, + TypeVar, TYPE_CHECKING +) +from azure.multiapi.storagev2.blob.v2020_06_12 import BlobLeaseClient + + +if TYPE_CHECKING: + from datetime import datetime + FileSystemClient = TypeVar("FileSystemClient") + DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient") + DataLakeFileClient = TypeVar("DataLakeFileClient") + + +class DataLakeLeaseClient(object): + """Creates a new DataLakeLeaseClient. + + This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the file system, directory, or file to lease. + :type client: ~azure.storage.filedatalake.FileSystemClient or + ~azure.storage.filedatalake.DataLakeDirectoryClient or ~azure.storage.filedatalake.DataLakeFileClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + def __init__( + self, client, lease_id=None + ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs + # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None + self.id = lease_id or str(uuid.uuid4()) + self.last_modified = None + self.etag = None + + if hasattr(client, '_blob_client'): + _client = client._blob_client # type: ignore # pylint: disable=protected-access + elif hasattr(client, '_container_client'): + _client = client._container_client # type: ignore # pylint: disable=protected-access + else: + raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.") + + self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.release() + + def acquire(self, lease_duration=-1, **kwargs): + # type: (int, Optional[int], **Any) -> None + """Requests a new lease. + + If the file/file system does not have an active lease, the DataLake service creates a + lease on the file/file system and returns a new lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs) + self._update_lease_client_attributes() + + def renew(self, **kwargs): + # type: (Any) -> None + """Renews the lease. + + The lease can be renewed if the lease ID specified in the + lease client matches that associated with the file system or file. Note that + the lease may be renewed even if it has expired as long as the file system + or file has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + self._blob_lease_client.renew(**kwargs) + self._update_lease_client_attributes() + + def release(self, **kwargs): + # type: (Any) -> None + """Release the lease. + + The lease may be released if the client lease id specified matches + that associated with the file system or file. Releasing the lease allows another client + to immediately acquire the lease for the file system or file as soon as the release is complete. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + self._blob_lease_client.release(**kwargs) + self._update_lease_client_attributes() + + def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """Change the lease ID of an active lease. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs) + self._update_lease_client_attributes() + + def break_lease(self, lease_break_period=None, **kwargs): + # type: (Optional[int], Any) -> int + """Break the lease, if the file system or file has an active lease. + + Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. When a lease + is broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the file system or file. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :param int lease_break_period: + This is the proposed duration of seconds that the lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the lease. If longer, the time remaining on the lease is used. + A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs) + + def _update_lease_client_attributes(self): + self.id = self._blob_lease_client.id # type: str + self.last_modified = self._blob_lease_client.last_modified # type: datetime + self.etag = self._blob_lease_client.etag # type: str diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_service_client.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_service_client.py new file mode 100644 index 0000000..d46af1f --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_data_lake_service_client.py @@ -0,0 +1,560 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from typing import Optional, Dict, Any + +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse # type: ignore + +from azure.core.paging import ItemPaged +from azure.core.pipeline import Pipeline + +from azure.multiapi.storagev2.blob.v2020_06_12 import BlobServiceClient +from ._shared.base_client import TransportWrapper, StorageAccountHostsMixin, parse_query, parse_connection_str +from ._deserialize import get_datalake_service_properties +from ._file_system_client import FileSystemClient +from ._data_lake_directory_client import DataLakeDirectoryClient +from ._data_lake_file_client import DataLakeFileClient +from ._models import UserDelegationKey, FileSystemPropertiesPaged, LocationMode +from ._serialize import convert_dfs_url_to_blob_url, get_api_version +from ._generated import AzureDataLakeStorageRESTAPI + + +class DataLakeServiceClient(StorageAccountHostsMixin): + """A client to interact with the DataLake Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete file systems within the account. + For operations relating to a specific file system, directory or file, clients for those entities + can also be retrieved using the `get_client` functions. + + :ivar str url: + The full endpoint URL to the datalake service endpoint. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URL to the DataLake storage account. Any other entities included + in the URL path (e.g. file system or file) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START create_datalake_service_client] + :end-before: [END create_datalake_service_client] + :language: python + :dedent: 8 + :caption: Creating the DataLakeServiceClient from connection string. + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START create_datalake_service_client_oauth] + :end-before: [END create_datalake_service_client_oauth] + :language: python + :dedent: 8 + :caption: Creating the DataLakeServiceClient with Azure Identity credentials. + """ + + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + blob_account_url = convert_dfs_url_to_blob_url(account_url) + self._blob_account_url = blob_account_url + self._blob_service_client = BlobServiceClient(blob_account_url, credential, **kwargs) + self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access + + _, sas_token = parse_query(parsed_url.query) + self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) + + super(DataLakeServiceClient, self).__init__(parsed_url, service='dfs', + credential=self._raw_credential, **kwargs) + # ADLS doesn't support secondary endpoint, make sure it's empty + self._hosts[LocationMode.SECONDARY] = "" + + self._client = AzureDataLakeStorageRESTAPI(self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs) #pylint: disable=protected-access + + def __enter__(self): + self._blob_service_client.__enter__() + return self + + def __exit__(self, *args): + self._blob_service_client.close() + + def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._blob_service_client.close() + + def _format_url(self, hostname): + """Format the endpoint URL according to hostname + """ + formated_url = "{}://{}/{}".format(self.scheme, hostname, self._query_str) + return formated_url + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> DataLakeServiceClient + """ + Create DataLakeServiceClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :return a DataLakeServiceClient + :rtype ~azure.storage.filedatalake.DataLakeServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_data_lake_service_client_from_conn_str] + :end-before: [END create_data_lake_service_client_from_conn_str] + :language: python + :dedent: 8 + :caption: Creating the DataLakeServiceClient from a connection string. + """ + account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') + return cls(account_url, credential=credential, **kwargs) + + def get_user_delegation_key(self, key_start_time, # type: datetime + key_expiry_time, # type: datetime + **kwargs # type: Any + ): + # type: (...) -> UserDelegationKey + """ + Obtain a user delegation key for the purpose of signing SAS tokens. + A token credential must be present on the service object for this request to succeed. + + :param ~datetime.datetime key_start_time: + A DateTime value. Indicates when the key becomes valid. + :param ~datetime.datetime key_expiry_time: + A DateTime value. Indicates when the key stops being valid. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The user delegation key. + :rtype: ~azure.storage.filedatalake.UserDelegationKey + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START get_user_delegation_key] + :end-before: [END get_user_delegation_key] + :language: python + :dedent: 8 + :caption: Get user delegation key from datalake service client. + """ + delegation_key = self._blob_service_client.get_user_delegation_key(key_start_time=key_start_time, + key_expiry_time=key_expiry_time, + **kwargs) # pylint: disable=protected-access + return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access + + def list_file_systems(self, name_starts_with=None, # type: Optional[str] + include_metadata=None, # type: Optional[bool] + **kwargs): + # type: (...) -> ItemPaged[FileSystemProperties] + """Returns a generator to list the file systems under the specified account. + + The generator will lazily follow the continuation tokens returned by + the service and stop when all file systems have been returned. + + :param str name_starts_with: + Filters the results to return only file systems whose names + begin with the specified prefix. + :param bool include_metadata: + Specifies that file system metadata be returned in the response. + The default value is `False`. + :keyword int results_per_page: + The maximum number of file system names to retrieve per API + call. If the request does not specify the server will return up to 5,000 items per page. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword bool include_deleted: + Specifies that deleted file systems to be returned in the response. This is for file system restore enabled + account. The default value is `False`. + .. versionadded:: 12.3.0 + :returns: An iterable (auto-paging) of FileSystemProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START list_file_systems] + :end-before: [END list_file_systems] + :language: python + :dedent: 8 + :caption: Listing the file systems in the datalake service. + """ + item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, + include_metadata=include_metadata, + **kwargs) # pylint: disable=protected-access + item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access + return item_paged + + def create_file_system(self, file_system, # type: Union[FileSystemProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[PublicAccess] + **kwargs): + # type: (...) -> FileSystemClient + """Creates a new file system under the specified account. + + If the file system with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created file system. + + :param str file_system: + The name of the file system to create. + :param metadata: + A dict with name-value pairs to associate with the + file system as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + Possible values include: file system, file. + :type public_access: ~azure.storage.filedatalake.PublicAccess + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START create_file_system_from_service_client] + :end-before: [END create_file_system_from_service_client] + :language: python + :dedent: 8 + :caption: Creating a file system in the datalake service. + """ + file_system_client = self.get_file_system_client(file_system) + file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs) + return file_system_client + + def _rename_file_system(self, name, new_name, **kwargs): + # type: (str, str, **Any) -> FileSystemClient + """Renames a filesystem. + + Operation is successful only if the source filesystem exists. + + :param str name: + The name of the filesystem to rename. + :param str new_name: + The new filesystem name the user wants to rename to. + :keyword lease: + Specify this to perform only if the lease ID given + matches the active lease ID of the source filesystem. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.FileSystemClient + """ + self._blob_service_client._rename_container(name, new_name, **kwargs) # pylint: disable=protected-access + renamed_file_system = self.get_file_system_client(new_name) + return renamed_file_system + + def undelete_file_system(self, name, deleted_version, **kwargs): + # type: (str, str, **Any) -> FileSystemClient + """Restores soft-deleted filesystem. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.3.0 + This operation was introduced in API version '2019-12-12'. + + :param str name: + Specifies the name of the deleted filesystem to restore. + :param str deleted_version: + Specifies the version of the deleted filesystem to restore. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.FileSystemClient + """ + new_name = kwargs.pop('new_name', None) + file_system = self.get_file_system_client(new_name or name) + self._blob_service_client.undelete_container( + name, deleted_version, new_name=new_name, **kwargs) # pylint: disable=protected-access + return file_system + + def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str] + **kwargs): + # type: (...) -> FileSystemClient + """Marks the specified file system for deletion. + + The file system and any files contained within it are later deleted during garbage collection. + If the file system is not found, a ResourceNotFoundError will be raised. + + :param file_system: + The file system to delete. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :keyword lease: + If specified, delete_file_system only succeeds if the + file system's lease is active and matches this ID. + Required if the file system has an active lease. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START delete_file_system_from_service_client] + :end-before: [END delete_file_system_from_service_client] + :language: python + :dedent: 8 + :caption: Deleting a file system in the datalake service. + """ + file_system_client = self.get_file_system_client(file_system) + file_system_client.delete_file_system(**kwargs) + return file_system_client + + def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str] + ): + # type: (...) -> FileSystemClient + """Get a client to interact with the specified file system. + + The file system need not already exist. + + :param file_system: + The file system. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :returns: A FileSystemClient. + :rtype: ~azure.storage.filedatalake.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_file_system_client_from_service] + :end-before: [END create_file_system_client_from_service] + :language: python + :dedent: 8 + :caption: Getting the file system client to interact with a specific file system. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, + api_version=self.api_version, + _configuration=self._config, + _pipeline=_pipeline, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str] + directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified directory. + + The directory need not already exist. + + :param file_system: + The file system that the directory is in. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START get_directory_client_from_service_client] + :end-before: [END get_directory_client_from_service_client] + :language: python + :dedent: 8 + :caption: Getting the directory client to interact with a specific directory. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + try: + directory_name = directory.name + except AttributeError: + directory_name = directory + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, + credential=self._raw_credential, + api_version=self.api_version, + _configuration=self._config, _pipeline=_pipeline, + _hosts=self._hosts, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function + ) + + def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] + file_path # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file_system: + The file system that the file is in. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :param file_path: + The file with which to interact. This can either be the full path of the file(from the root directory), + or an instance of FileProperties. eg. directory/subdirectory/file + :type file_path: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake.DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service.py + :start-after: [START get_file_client_from_service_client] + :end-before: [END get_file_client_from_service_client] + :language: python + :dedent: 8 + :caption: Getting the file client to interact with a specific file. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + try: + file_path = file_path.name + except AttributeError: + pass + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return DataLakeFileClient( + self.url, file_system_name, file_path=file_path, credential=self._raw_credential, + api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def set_service_properties(self, **kwargs): + # type: (**Any) -> None + """Sets the properties of a storage account's Datalake service, including + Azure Storage Analytics. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2020-06-12'. + + If an element (e.g. analytics_logging) is left as None, the + existing settings on the service for that functionality are preserved. + + :keyword analytics_logging: + Groups the Azure Analytics Logging settings. + :type analytics_logging: ~azure.storage.filedatalake.AnalyticsLogging + :keyword hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates. + :type hour_metrics: ~azure.storage.filedatalake.Metrics + :keyword minute_metrics: + The minute metrics settings provide request statistics + for each minute. + :type minute_metrics: ~azure.storage.filedatalake.Metrics + :keyword cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list[~azure.storage.filedatalake.CorsRule] + :keyword str target_version: + Indicates the default version to use for requests if an incoming + request's version is not specified. + :keyword delete_retention_policy: + The delete retention policy specifies whether to retain deleted files/directories. + It also specifies the number of days and versions of file/directory to keep. + :type delete_retention_policy: ~azure.storage.filedatalake.RetentionPolicy + :keyword static_website: + Specifies whether the static website feature is enabled, + and if yes, indicates the index document and 404 error document to use. + :type static_website: ~azure.storage.filedatalake.StaticWebsite + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + return self._blob_service_client.set_service_properties(**kwargs) # pylint: disable=protected-access + + def get_service_properties(self, **kwargs): + # type: (**Any) -> Dict[str, Any] + """Gets the properties of a storage account's datalake service, including + Azure Storage Analytics. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2020-06-12'. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An object containing datalake service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + """ + props = self._blob_service_client.get_service_properties(**kwargs) # pylint: disable=protected-access + return get_datalake_service_properties(props) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_deserialize.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_deserialize.py new file mode 100644 index 0000000..a323995 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_deserialize.py @@ -0,0 +1,212 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import logging +from typing import ( # pylint: disable=unused-import + TYPE_CHECKING +) +from xml.etree.ElementTree import Element + +from azure.core.pipeline.policies import ContentDecodePolicy +from azure.core.exceptions import HttpResponseError, DecodeError, ResourceModifiedError, ClientAuthenticationError, \ + ResourceNotFoundError, ResourceExistsError +from ._models import FileProperties, DirectoryProperties, LeaseProperties, DeletedPathProperties, StaticWebsite, \ + RetentionPolicy, Metrics, AnalyticsLogging, PathProperties # pylint: disable=protected-access +from ._shared.models import StorageErrorCode + +if TYPE_CHECKING: + pass + +_LOGGER = logging.getLogger(__name__) + + +def deserialize_dir_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + dir_properties = DirectoryProperties( + metadata=metadata, + **headers + ) + return dir_properties + + +def deserialize_file_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + file_properties = FileProperties( + metadata=metadata, + **headers + ) + if 'Content-Range' in headers: + if 'x-ms-blob-content-md5' in headers: + file_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] + else: + file_properties.content_settings.content_md5 = None + return file_properties + + +def deserialize_path_properties(path_list): + return [PathProperties._from_generated(path) for path in path_list] # pylint: disable=protected-access + + +def get_deleted_path_properties_from_generated_code(generated): + deleted_path = DeletedPathProperties() + deleted_path.name = generated.name + deleted_path.deleted_time = generated.properties.deleted_time + deleted_path.remaining_retention_days = generated.properties.remaining_retention_days + deleted_path.deletion_id = generated.deletion_id + return deleted_path + + +def is_file_path(_, __, headers): + if headers['x-ms-resource-type'] == "file": + return True + return False + + +def get_datalake_service_properties(datalake_properties): + datalake_properties["analytics_logging"] = AnalyticsLogging._from_generated( # pylint: disable=protected-access + datalake_properties["analytics_logging"]) + datalake_properties["hour_metrics"] = Metrics._from_generated(datalake_properties["hour_metrics"]) # pylint: disable=protected-access + datalake_properties["minute_metrics"] = Metrics._from_generated( # pylint: disable=protected-access + datalake_properties["minute_metrics"]) + datalake_properties["delete_retention_policy"] = RetentionPolicy._from_generated( # pylint: disable=protected-access + datalake_properties["delete_retention_policy"]) + datalake_properties["static_website"] = StaticWebsite._from_generated( # pylint: disable=protected-access + datalake_properties["static_website"]) + return datalake_properties + + +def from_blob_properties(blob_properties): + file_props = FileProperties() + file_props.name = blob_properties.name + file_props.etag = blob_properties.etag + file_props.deleted = blob_properties.deleted + file_props.metadata = blob_properties.metadata + file_props.lease = blob_properties.lease + file_props.lease.__class__ = LeaseProperties + file_props.last_modified = blob_properties.last_modified + file_props.creation_time = blob_properties.creation_time + file_props.size = blob_properties.size + file_props.deleted_time = blob_properties.deleted_time + file_props.remaining_retention_days = blob_properties.remaining_retention_days + file_props.content_settings = blob_properties.content_settings + return file_props + + +def normalize_headers(headers): + normalized = {} + for key, value in headers.items(): + if key.startswith('x-ms-'): + key = key[5:] + normalized[key.lower().replace('-', '_')] = value + return normalized + + +def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument + try: + raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} + except AttributeError: + raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} + return {k[10:]: v for k, v in raw_metadata.items()} + + +def process_storage_error(storage_error): # pylint:disable=too-many-statements + raise_error = HttpResponseError + serialized = False + if not storage_error.response: + raise storage_error + # If it is one of those three then it has been serialized prior by the generated layer. + if isinstance(storage_error, (ResourceNotFoundError, ClientAuthenticationError, ResourceExistsError)): + serialized = True + error_code = storage_error.response.headers.get('x-ms-error-code') + error_message = storage_error.message + additional_data = {} + error_dict = {} + try: + error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) + # If it is an XML response + if isinstance(error_body, Element): + error_dict = { + child.tag.lower(): child.text + for child in error_body + } + # If it is a JSON response + elif isinstance(error_body, dict): + error_dict = error_body.get('error', {}) + elif not error_code: + _LOGGER.warning( + 'Unexpected return type % from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) + error_dict = {'message': str(error_body)} + + # If we extracted from a Json or XML response + if error_dict: + error_code = error_dict.get('code') + error_message = error_dict.get('message') + additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} + + except DecodeError: + pass + + try: + # This check would be unnecessary if we have already serialized the error. + if error_code and not serialized: + error_code = StorageErrorCode(error_code) + if error_code in [StorageErrorCode.condition_not_met]: + raise_error = ResourceModifiedError + if error_code in [StorageErrorCode.invalid_authentication_info, + StorageErrorCode.authentication_failed]: + raise_error = ClientAuthenticationError + if error_code in [StorageErrorCode.resource_not_found, + StorageErrorCode.invalid_property_name, + StorageErrorCode.invalid_source_uri, + StorageErrorCode.source_path_not_found, + StorageErrorCode.lease_name_mismatch, + StorageErrorCode.file_system_not_found, + StorageErrorCode.path_not_found, + StorageErrorCode.parent_not_found, + StorageErrorCode.invalid_destination_path, + StorageErrorCode.invalid_rename_source_path, + StorageErrorCode.lease_is_already_broken, + StorageErrorCode.invalid_source_or_destination_resource_type, + StorageErrorCode.rename_destination_parent_path_not_found]: + raise_error = ResourceNotFoundError + if error_code in [StorageErrorCode.account_already_exists, + StorageErrorCode.account_being_created, + StorageErrorCode.resource_already_exists, + StorageErrorCode.resource_type_mismatch, + StorageErrorCode.source_path_is_being_deleted, + StorageErrorCode.path_already_exists, + StorageErrorCode.destination_path_is_being_deleted, + StorageErrorCode.file_system_already_exists, + StorageErrorCode.file_system_being_deleted, + StorageErrorCode.path_conflict]: + raise_error = ResourceExistsError + except ValueError: + # Got an unknown error code + pass + + # Error message should include all the error properties + try: + error_message += "\nErrorCode:{}".format(error_code.value) + except AttributeError: + error_message += "\nErrorCode:{}".format(error_code) + for name, info in additional_data.items(): + error_message += "\n{}:{}".format(name, info) + + # No need to create an instance if it has already been serialized by the generated layer + if serialized: + storage_error.message = error_message + error = storage_error + else: + error = raise_error(message=error_message, response=storage_error.response) + # Ensure these properties are stored in the error instance as well (not just the error message) + error.error_code = error_code + error.additional_info = additional_data + # error.args is what's surfaced on the traceback - show error message in all cases + error.args = (error.message,) + try: + # `from None` prevents us from double printing the exception (suppresses generated layer error context) + exec("raise error from None") # pylint: disable=exec-used # nosec + except SyntaxError: + raise error diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_download.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_download.py new file mode 100644 index 0000000..61716d3 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_download.py @@ -0,0 +1,59 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from typing import Iterator + +from ._deserialize import from_blob_properties + + +class StorageStreamDownloader(object): + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the file being downloaded. + :ivar ~azure.storage.filedatalake.FileProperties properties: + The properties of the file being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if speficied, + otherwise the total size of the file. + """ + + def __init__(self, downloader): + self._downloader = downloader + self.name = self._downloader.name + self.properties = from_blob_properties(self._downloader.properties) # pylint: disable=protected-access + self.size = self._downloader.size + + def __len__(self): + return self.size + + def chunks(self): + # type: () -> Iterator[bytes] + """Iterate over chunks in the download stream. + + :rtype: Iterator[bytes] + """ + return self._downloader.chunks() + + def readall(self): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + :rtype: bytes or str + """ + return self._downloader.readall() + + def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + return self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_file_system_client.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_file_system_client.py new file mode 100644 index 0000000..3be2ac3 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_file_system_client.py @@ -0,0 +1,922 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import functools +from typing import Optional, Any, Union + + +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore +import six + +from azure.core.pipeline import Pipeline +from azure.core.exceptions import HttpResponseError +from azure.core.paging import ItemPaged +from azure.multiapi.storagev2.blob.v2020_06_12 import ContainerClient +from ._shared.base_client import TransportWrapper, StorageAccountHostsMixin, parse_query, parse_connection_str +from ._serialize import convert_dfs_url_to_blob_url, get_api_version +from ._list_paths_helper import DeletedPathPropertiesPaged +from ._models import LocationMode, FileSystemProperties, PublicAccess, DeletedPathProperties, FileProperties, \ + DirectoryProperties +from ._data_lake_file_client import DataLakeFileClient +from ._data_lake_directory_client import DataLakeDirectoryClient +from ._data_lake_lease import DataLakeLeaseClient +from ._generated import AzureDataLakeStorageRESTAPI +from ._generated.models import ListBlobsIncludeItem +from ._deserialize import deserialize_path_properties, process_storage_error, is_file_path + + +class FileSystemClient(StorageAccountHostsMixin): + """A client to interact with a specific file system, even if that file system + may not yet exist. + + For operations relating to a specific directory or file within this file system, a directory client or file client + can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_file_system_client_from_service] + :end-before: [END create_file_system_client_from_service] + :language: python + :dedent: 8 + :caption: Get a FileSystemClient from an existing DataLakeServiceClient. + """ + def __init__( + self, account_url, # type: str + file_system_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not file_system_name: + raise ValueError("Please specify a file system name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + blob_account_url = convert_dfs_url_to_blob_url(account_url) + # TODO: add self.account_url to base_client and remove _blob_account_url + self._blob_account_url = blob_account_url + + datalake_hosts = kwargs.pop('_hosts', None) + blob_hosts = None + if datalake_hosts: + blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) + blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} + self._container_client = ContainerClient(blob_account_url, file_system_name, + credential=credential, _hosts=blob_hosts, **kwargs) + + _, sas_token = parse_query(parsed_url.query) + self.file_system_name = file_system_name + self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) + + super(FileSystemClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, + _hosts=datalake_hosts, **kwargs) + # ADLS doesn't support secondary endpoint, make sure it's empty + self._hosts[LocationMode.SECONDARY] = "" + self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, pipeline=self._pipeline) + api_version = get_api_version(kwargs) + self._client._config.version = api_version # pylint: disable=protected-access + self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._container_client.url, + file_system=file_system_name, + pipeline=self._pipeline) + self._datalake_client_for_blob_operation._config.version = api_version # pylint: disable=protected-access + + def _format_url(self, hostname): + file_system_name = self.file_system_name + if isinstance(file_system_name, six.text_type): + file_system_name = file_system_name.encode('UTF-8') + return "{}://{}/{}{}".format( + self.scheme, + hostname, + quote(file_system_name), + self._query_str) + + def __exit__(self, *args): + self._container_client.close() + super(FileSystemClient, self).__exit__(*args) + + def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._container_client.close() + self.__exit__() + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + file_system_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> FileSystemClient + """ + Create FileSystemClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param file_system_name: The name of file system to interact with. + :type file_system_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account shared access + key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + :return a FileSystemClient + :rtype ~azure.storage.filedatalake.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_file_system_client_from_connection_string] + :end-before: [END create_file_system_client_from_connection_string] + :language: python + :dedent: 8 + :caption: Create FileSystemClient from connection string + """ + account_url, _, credential = parse_connection_str(conn_str, credential, 'dfs') + return cls( + account_url, file_system_name=file_system_name, credential=credential, **kwargs) + + def acquire_lease( + self, lease_duration=-1, # type: int + lease_id=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> DataLakeLeaseClient + """ + Requests a new lease. If the file system does not have an active lease, + the DataLake service creates a lease on the file system and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A DataLakeLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START acquire_lease_on_file_system] + :end-before: [END acquire_lease_on_file_system] + :language: python + :dedent: 8 + :caption: Acquiring a lease on the file system. + """ + lease = DataLakeLeaseClient(self, lease_id=lease_id) + lease.acquire(lease_duration=lease_duration, **kwargs) + return lease + + def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[PublicAccess] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """Creates a new file system under the specified account. + + If the file system with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created file system. + + :param metadata: + A dict with name-value pairs to associate with the + file system as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + To specify whether data in the file system may be accessed publicly and the level of access. + :type public_access: ~azure.storage.filedatalake.PublicAccess + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_file_system] + :end-before: [END create_file_system] + :language: python + :dedent: 12 + :caption: Creating a file system in the datalake service. + """ + return self._container_client.create_container(metadata=metadata, + public_access=public_access, + **kwargs) + + def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a file system exists and returns False otherwise. + + :kwarg int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + return self._container_client.exists(**kwargs) + + def _rename_file_system(self, new_name, **kwargs): + # type: (str, **Any) -> FileSystemClient + """Renames a filesystem. + + Operation is successful only if the source filesystem exists. + + :param str new_name: + The new filesystem name the user wants to rename to. + :keyword lease: + Specify this to perform only if the lease ID given + matches the active lease ID of the source filesystem. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.FileSystemClient + """ + self._container_client._rename_container(new_name, **kwargs) # pylint: disable=protected-access + #TODO: self._raw_credential would not work with SAS tokens + renamed_file_system = FileSystemClient( + "{}://{}".format(self.scheme, self.primary_hostname), file_system_name=new_name, + credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, + _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + return renamed_file_system + + def delete_file_system(self, **kwargs): + # type: (Any) -> None + """Marks the specified file system for deletion. + + The file system and any files contained within it are later deleted during garbage collection. + If the file system is not found, a ResourceNotFoundError will be raised. + + :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: + If specified, delete_file_system only succeeds if the + file system's lease is active and matches this ID. + Required if the file system has an active lease. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START delete_file_system] + :end-before: [END delete_file_system] + :language: python + :dedent: 12 + :caption: Deleting a file system in the datalake service. + """ + self._container_client.delete_container(**kwargs) + + def get_file_system_properties(self, **kwargs): + # type: (Any) -> FileSystemProperties + """Returns all user-defined metadata and system properties for the specified + file system. The data returned does not include the file system's list of paths. + + :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: + If specified, get_file_system_properties only succeeds if the + file system's lease is active and matches this ID. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Properties for the specified file system within a file system object. + :rtype: ~azure.storage.filedatalake.FileSystemProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START get_file_system_properties] + :end-before: [END get_file_system_properties] + :language: python + :dedent: 12 + :caption: Getting properties on the file system. + """ + container_properties = self._container_client.get_container_properties(**kwargs) + return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access + + def set_file_system_metadata( # type: ignore + self, metadata, # type: Dict[str, str] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + file system. Each call to this operation replaces all existing metadata + attached to the file system. To remove all metadata from the file system, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the file system as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword str or ~azure.storage.filedatalake.DataLakeLeaseClient lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: filesystem-updated property dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START set_file_system_metadata] + :end-before: [END set_file_system_metadata] + :language: python + :dedent: 12 + :caption: Setting metadata on the file system. + """ + return self._container_client.set_container_metadata(metadata=metadata, **kwargs) + + def set_file_system_access_policy( + self, signed_identifiers, # type: Dict[str, AccessPolicy] + public_access=None, # type: Optional[Union[str, PublicAccess]] + **kwargs + ): # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the permissions for the specified file system or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether files in a file system may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the file system. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] + :param ~azure.storage.filedatalake.PublicAccess public_access: + To specify whether data in the file system may be accessed publicly and the level of access. + :keyword lease: + Required if the file system has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified date/time. + :keyword ~datetime.datetime if_unmodified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File System-updated property dict (Etag and last modified). + :rtype: dict[str, str or ~datetime.datetime] + """ + return self._container_client.set_container_access_policy(signed_identifiers, + public_access=public_access, **kwargs) + + def get_file_system_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the specified file system. + The permissions indicate whether file system data may be accessed publicly. + + :keyword lease: + If specified, the operation only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + """ + access_policy = self._container_client.get_container_access_policy(**kwargs) + return { + 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access + 'signed_identifiers': access_policy['signed_identifiers'] + } + + def get_paths(self, path=None, # type: Optional[str] + recursive=True, # type: Optional[bool] + max_results=None, # type: Optional[int] + **kwargs): + # type: (...) -> ItemPaged[PathProperties] + """Returns a generator to list the paths(could be files or directories) under the specified file system. + The generator will lazily follow the continuation tokens returned by + the service. + + :param str path: + Filters the results to return only paths under the specified path. + :param int max_results: An optional value that specifies the maximum + number of items to return per page. If omitted or greater than 5,000, the + response will include up to 5,000 items per page. + :keyword upn: + Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of PathProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START get_paths_in_file_system] + :end-before: [END get_paths_in_file_system] + :language: python + :dedent: 8 + :caption: List the paths in the file system. + """ + timeout = kwargs.pop('timeout', None) + return self._client.file_system.list_paths( + recursive=recursive, + max_results=max_results, + path=path, + timeout=timeout, + cls=deserialize_path_properties, + **kwargs) + + def create_directory(self, directory, # type: Union[DirectoryProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Create directory + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_directory_from_file_system] + :end-before: [END create_directory_from_file_system] + :language: python + :dedent: 8 + :caption: Create directory in the file system. + """ + directory_client = self.get_directory_client(directory) + directory_client.create_directory(metadata=metadata, **kwargs) + return directory_client + + def delete_directory(self, directory, # type: Union[DirectoryProperties, str] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Marks the specified path for deletion. + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START delete_directory_from_file_system] + :end-before: [END delete_directory_from_file_system] + :language: python + :dedent: 8 + :caption: Delete directory in the file system. + """ + directory_client = self.get_directory_client(directory) + directory_client.delete_directory(**kwargs) + return directory_client + + def create_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Create file + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START create_file_from_file_system] + :end-before: [END create_file_from_file_system] + :language: python + :dedent: 8 + :caption: Create file in the file system. + """ + file_client = self.get_file_client(file) + file_client.create_file(**kwargs) + return file_client + + def delete_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Marks the specified file for deletion. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START delete_file_from_file_system] + :end-before: [END delete_file_from_file_system] + :language: python + :dedent: 8 + :caption: Delete file in the file system. + """ + file_client = self.get_file_client(file) + file_client.delete_file(**kwargs) + return file_client + + def _undelete_path_options(self, deleted_path_name, deletion_id): + quoted_path = quote(unquote(deleted_path_name.strip('/'))) + + url_and_token = self.url.replace('.dfs.', '.blob.').split('?') + try: + url = url_and_token[0] + '/' + quoted_path + url_and_token[1] + except IndexError: + url = url_and_token[0] + '/' + quoted_path + + undelete_source = quoted_path + '?deletionid={}'.format(deletion_id) if deletion_id else None + + return quoted_path, url, undelete_source + + def _undelete_path(self, deleted_path_name, deletion_id, **kwargs): + # type: (str, str, **Any) -> Union[DataLakeDirectoryClient, DataLakeFileClient] + """Restores soft-deleted path. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2020-06-12'. + + :param str deleted_path_name: + Specifies the path (file or directory) to restore. + :param str deletion_id: + Specifies the version of the deleted path to restore. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.file.datalake.DataLakeDirectoryClient or azure.storage.file.datalake.DataLakeFileClient + """ + _, url, undelete_source = self._undelete_path_options(deleted_path_name, deletion_id) + + pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + path_client = AzureDataLakeStorageRESTAPI( + url, filesystem=self.file_system_name, path=deleted_path_name, pipeline=pipeline) + try: + is_file = path_client.path.undelete(undelete_source=undelete_source, cls=is_file_path, **kwargs) + if is_file: + return self.get_file_client(deleted_path_name) + return self.get_directory_client(deleted_path_name) + except HttpResponseError as error: + process_storage_error(error) + + def _get_root_directory_client(self): + # type: () -> DataLakeDirectoryClient + """Get a client to interact with the root directory. + + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient + """ + return self.get_directory_client('/') + + def get_directory_client(self, directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified directory. + + The directory need not already exist. + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START get_directory_client_from_file_system] + :end-before: [END get_directory_client_from_file_system] + :language: python + :dedent: 8 + :caption: Getting the directory client to interact with a specific directory. + """ + try: + directory_name = directory.get('name') + except AttributeError: + directory_name = str(directory) + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, + credential=self._raw_credential, + api_version=self.api_version, + _configuration=self._config, _pipeline=_pipeline, + _hosts=self._hosts, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function + ) + + def get_file_client(self, file_path # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file_path: + The file with which to interact. This can either be the path of the file(from root directory), + or an instance of FileProperties. eg. directory/subdirectory/file + :type file_path: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake.DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system.py + :start-after: [START get_file_client_from_file_system] + :end-before: [END get_file_client_from_file_system] + :language: python + :dedent: 8 + :caption: Getting the file client to interact with a specific file. + """ + try: + file_path = file_path.get('name') + except AttributeError: + file_path = str(file_path) + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return DataLakeFileClient( + self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, + api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def list_deleted_paths(self, **kwargs): + # type: (Any) -> ItemPaged[DeletedPathProperties] + """Returns a generator to list the deleted (file or directory) paths under the specified file system. + The generator will lazily follow the continuation tokens returned by + the service. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2020-06-12'. + + :keyword str path_prefix: + Filters the results to return only paths under the specified path. + :keyword int results_per_page: + An optional value that specifies the maximum number of items to return per page. + If omitted or greater than 5,000, the response will include up to 5,000 items per page. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of DeletedPathProperties. + :rtype: + ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.DeletedPathProperties] + """ + path_prefix = kwargs.pop('path_prefix', None) + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._datalake_client_for_blob_operation.file_system.list_blob_hierarchy_segment, + showonly=ListBlobsIncludeItem.deleted, + timeout=timeout, + **kwargs) + return ItemPaged( + command, prefix=path_prefix, page_iterator_class=DeletedPathPropertiesPaged, + results_per_page=results_per_page, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/__init__.py new file mode 100644 index 0000000..5cd3ae2 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/__init__.py @@ -0,0 +1,16 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI +__all__ = ['AzureDataLakeStorageRESTAPI'] + +try: + from ._patch import patch_sdk # type: ignore + patch_sdk() +except ImportError: + pass diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/_azure_data_lake_storage_restapi.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/_azure_data_lake_storage_restapi.py new file mode 100644 index 0000000..fbd0a79 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/_azure_data_lake_storage_restapi.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING + +from azure.core import PipelineClient +from msrest import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + + from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from ._configuration import AzureDataLakeStorageRESTAPIConfiguration +from .operations import ServiceOperations +from .operations import FileSystemOperations +from .operations import PathOperations +from . import models + + +class AzureDataLakeStorageRESTAPI(object): + """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. + + :ivar service: ServiceOperations operations + :vartype service: azure.storage.filedatalake.operations.ServiceOperations + :ivar file_system: FileSystemOperations operations + :vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations + :ivar path: PathOperations operations + :vartype path: azure.storage.filedatalake.operations.PathOperations + :param url: The URL of the service account, container, or blob that is the targe of the desired operation. + :type url: str + """ + + def __init__( + self, + url, # type: str + **kwargs # type: Any + ): + # type: (...) -> None + base_url = '{url}' + self._config = AzureDataLakeStorageRESTAPIConfiguration(url, **kwargs) + self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._serialize.client_side_validation = False + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.file_system = FileSystemOperations( + self._client, self._config, self._serialize, self._deserialize) + self.path = PathOperations( + self._client, self._config, self._serialize, self._deserialize) + + def _send_request(self, http_request, **kwargs): + # type: (HttpRequest, Any) -> HttpResponse + """Runs the network request through the client's chained policies. + + :param http_request: The network request you want to make. Required. + :type http_request: ~azure.core.pipeline.transport.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to True. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.pipeline.transport.HttpResponse + """ + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + http_request.url = self._client.format_url(http_request.url, **path_format_arguments) + stream = kwargs.pop("stream", True) + pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs) + return pipeline_response.http_response + + def close(self): + # type: () -> None + self._client.close() + + def __enter__(self): + # type: () -> AzureDataLakeStorageRESTAPI + self._client.__enter__() + return self + + def __exit__(self, *exc_details): + # type: (Any) -> None + self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/_configuration.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/_configuration.py new file mode 100644 index 0000000..3bfff36 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/_configuration.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + +VERSION = "unknown" + +class AzureDataLakeStorageRESTAPIConfiguration(Configuration): + """Configuration for AzureDataLakeStorageRESTAPI. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, container, or blob that is the targe of the desired operation. + :type url: str + """ + + def __init__( + self, + url, # type: str + **kwargs # type: Any + ): + # type: (...) -> None + if url is None: + raise ValueError("Parameter 'url' must not be None.") + super(AzureDataLakeStorageRESTAPIConfiguration, self).__init__(**kwargs) + + self.url = url + self.resource = "filesystem" + self.version = "2020-06-12" + kwargs.setdefault('sdk_moniker', 'azuredatalakestoragerestapi/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs # type: Any + ): + # type: (...) -> None + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/__init__.py new file mode 100644 index 0000000..24daed3 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/__init__.py @@ -0,0 +1,10 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._azure_data_lake_storage_restapi import AzureDataLakeStorageRESTAPI +__all__ = ['AzureDataLakeStorageRESTAPI'] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/_azure_data_lake_storage_restapi.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/_azure_data_lake_storage_restapi.py new file mode 100644 index 0000000..efeeeb3 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/_azure_data_lake_storage_restapi.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core import AsyncPipelineClient +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from msrest import Deserializer, Serializer + +from ._configuration import AzureDataLakeStorageRESTAPIConfiguration +from .operations import ServiceOperations +from .operations import FileSystemOperations +from .operations import PathOperations +from .. import models + + +class AzureDataLakeStorageRESTAPI(object): + """Azure Data Lake Storage provides storage for Hadoop and other big data workloads. + + :ivar service: ServiceOperations operations + :vartype service: azure.storage.filedatalake.aio.operations.ServiceOperations + :ivar file_system: FileSystemOperations operations + :vartype file_system: azure.storage.filedatalake.aio.operations.FileSystemOperations + :ivar path: PathOperations operations + :vartype path: azure.storage.filedatalake.aio.operations.PathOperations + :param url: The URL of the service account, container, or blob that is the targe of the desired operation. + :type url: str + """ + + def __init__( + self, + url: str, + **kwargs: Any + ) -> None: + base_url = '{url}' + self._config = AzureDataLakeStorageRESTAPIConfiguration(url, **kwargs) + self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._serialize.client_side_validation = False + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.file_system = FileSystemOperations( + self._client, self._config, self._serialize, self._deserialize) + self.path = PathOperations( + self._client, self._config, self._serialize, self._deserialize) + + async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse: + """Runs the network request through the client's chained policies. + + :param http_request: The network request you want to make. Required. + :type http_request: ~azure.core.pipeline.transport.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to True. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.pipeline.transport.AsyncHttpResponse + """ + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + http_request.url = self._client.format_url(http_request.url, **path_format_arguments) + stream = kwargs.pop("stream", True) + pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs) + return pipeline_response.http_response + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "AzureDataLakeStorageRESTAPI": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details) -> None: + await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/_configuration.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/_configuration.py new file mode 100644 index 0000000..8223472 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/_configuration.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +VERSION = "unknown" + +class AzureDataLakeStorageRESTAPIConfiguration(Configuration): + """Configuration for AzureDataLakeStorageRESTAPI. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, container, or blob that is the targe of the desired operation. + :type url: str + """ + + def __init__( + self, + url: str, + **kwargs: Any + ) -> None: + if url is None: + raise ValueError("Parameter 'url' must not be None.") + super(AzureDataLakeStorageRESTAPIConfiguration, self).__init__(**kwargs) + + self.url = url + self.resource = "filesystem" + self.version = "2020-06-12" + kwargs.setdefault('sdk_moniker', 'azuredatalakestoragerestapi/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs: Any + ) -> None: + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/__init__.py new file mode 100644 index 0000000..0db71e0 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/__init__.py @@ -0,0 +1,17 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._file_system_operations import FileSystemOperations +from ._path_operations import PathOperations + +__all__ = [ + 'ServiceOperations', + 'FileSystemOperations', + 'PathOperations', +] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_file_system_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_file_system_operations.py new file mode 100644 index 0000000..d4e206a --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_file_system_operations.py @@ -0,0 +1,631 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union +import warnings + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class FileSystemOperations: + """FileSystemOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.filedatalake.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create( + self, + request_id_parameter: Optional[str] = None, + timeout: Optional[int] = None, + properties: Optional[str] = None, + **kwargs + ) -> None: + """Create FileSystem. + + Create a FileSystem rooted at the specified location. If the FileSystem already exists, the + operation fails. This operation does not support conditional HTTP requests. + + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param properties: Optional. User-defined properties to be stored with the filesystem, in the + format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value + is a base64 encoded string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties not included in the list + will be removed. All properties are removed if the header is omitted. To merge new and + existing properties, first get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all properties. + :type properties: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/json" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{filesystem}'} # type: ignore + + async def set_properties( + self, + request_id_parameter: Optional[str] = None, + timeout: Optional[int] = None, + properties: Optional[str] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Set FileSystem Properties. + + Set properties for the FileSystem. This operation supports conditional HTTP requests. For + more information, see `Specifying Conditional Headers for Blob Service Operations + `_. + + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param properties: Optional. User-defined properties to be stored with the filesystem, in the + format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value + is a base64 encoded string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties not included in the list + will be removed. All properties are removed if the header is omitted. To merge new and + existing properties, first get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all properties. + :type properties: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + accept = "application/json" + + # Construct URL + url = self.set_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {'url': '/{filesystem}'} # type: ignore + + async def get_properties( + self, + request_id_parameter: Optional[str] = None, + timeout: Optional[int] = None, + **kwargs + ) -> None: + """Get FileSystem Properties. + + All system and user-defined filesystem properties are specified in the response headers. + + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/json" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) + response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{filesystem}'} # type: ignore + + async def delete( + self, + request_id_parameter: Optional[str] = None, + timeout: Optional[int] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Delete FileSystem. + + Marks the FileSystem for deletion. When a FileSystem is deleted, a FileSystem with the same + identifier cannot be created for at least 30 seconds. While the filesystem is being deleted, + attempts to create a filesystem with the same identifier will fail with status code 409 + (Conflict), with the service returning additional error information indicating that the + filesystem is being deleted. All other operations, including operations on any files or + directories within the filesystem, will fail with status code 404 (Not Found) while the + filesystem is being deleted. This operation supports conditional HTTP requests. For more + information, see `Specifying Conditional Headers for Blob Service Operations + `_. + + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + accept = "application/json" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{filesystem}'} # type: ignore + + def list_paths( + self, + recursive: bool, + request_id_parameter: Optional[str] = None, + timeout: Optional[int] = None, + continuation: Optional[str] = None, + path: Optional[str] = None, + max_results: Optional[int] = None, + upn: Optional[bool] = None, + **kwargs + ) -> AsyncIterable["_models.PathList"]: + """List Paths. + + List FileSystem paths and their properties. + + :param recursive: Required. + :type recursive: bool + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param continuation: Optional. When deleting a directory, the number of paths that are deleted + with each invocation is limited. If the number of paths to be deleted exceeds this limit, a + continuation token is returned in this response header. When a continuation token is returned + in the response, it must be specified in a subsequent invocation of the delete operation to + continue deleting the directory. + :type continuation: str + :param path: Optional. Filters results to paths within the specified directory. An error + occurs if the directory does not exist. + :type path: str + :param max_results: An optional value that specifies the maximum number of items to return. If + omitted or greater than 5,000, the response will include up to 5,000 items. + :type max_results: int + :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If + "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response + headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If + "false", the values will be returned as Azure Active Directory Object IDs. The default value is + false. Note that group and application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either PathList or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.filedatalake.models.PathList] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PathList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/json" + + # TODO: change this once continuation/next_link autorest PR is merged + def prepare_request(next_link=None, cont_token=None): + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", + request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, + 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + if not next_link: + # Construct URL + url = self.list_paths.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, + 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + # TODO: change this once continuation/next_link autorest PR is merged + if cont_token is not None: + query_parameters['continuation'] = self._serialize.query("continuation", cont_token, 'str') + if path is not None: + query_parameters['directory'] = self._serialize.query("path", path, 'str') + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + if max_results is not None: + query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + + request = self._client.get(url, query_parameters, header_parameters) + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + request = self._client.get(url, query_parameters, header_parameters) + return request + + async def extract_data(pipeline_response): + # TODO: change this once continuation/next_link autorest PR is merged + try: + cont_token = pipeline_response.http_response.headers['x-ms-continuation'] + except KeyError: + cont_token = None + deserialized = self._deserialize('PathList', pipeline_response) + list_of_elem = deserialized.paths + if cls: + list_of_elem = cls(list_of_elem) + return cont_token, AsyncList(list_of_elem) + + # TODO: change this once continuation/next_link autorest PR is merged + async def get_next(cont_token=None): + cont_token = cont_token if not continuation else continuation + request = prepare_request(cont_token=cont_token) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged( + get_next, extract_data + ) + list_paths.metadata = {'url': '/{filesystem}'} # type: ignore + + async def list_blob_hierarchy_segment( + self, + prefix: Optional[str] = None, + delimiter: Optional[str] = None, + marker: Optional[str] = None, + max_results: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, + showonly: Optional[str] = "deleted", + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs + ) -> "_models.ListBlobsHierarchySegmentResponse": + """The List Blobs operation returns a list of the blobs under the specified container. + + :param prefix: Filters results to filesystems within the specified prefix. + :type prefix: str + :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix + element in the response body that acts as a placeholder for all blobs whose names begin with + the same substring up to the appearance of the delimiter character. The delimiter may be a + single character or a string. + :type delimiter: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param max_results: An optional value that specifies the maximum number of items to return. If + omitted or greater than 5,000, the response will include up to 5,000 items. + :type max_results: int + :param include: Include this parameter to specify one or more datasets to include in the + response. + :type include: list[str or ~azure.storage.filedatalake.models.ListBlobsIncludeItem] + :param showonly: Include this parameter to specify one or more datasets to include in the + response. + :type showonly: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) + :rtype: ~azure.storage.filedatalake.models.ListBlobsHierarchySegmentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if delimiter is not None: + query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if max_results is not None: + query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + if showonly is not None: + query_parameters['showonly'] = self._serialize.query("showonly", showonly, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_blob_hierarchy_segment.metadata = {'url': '/{filesystem}'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_path_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_path_operations.py new file mode 100644 index 0000000..7cec589 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_path_operations.py @@ -0,0 +1,1773 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class PathOperations: + """PathOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.filedatalake.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create( + self, + request_id_parameter: Optional[str] = None, + timeout: Optional[int] = None, + resource: Optional[Union[str, "_models.PathResourceType"]] = None, + continuation: Optional[str] = None, + mode: Optional[Union[str, "_models.PathRenameMode"]] = None, + rename_source: Optional[str] = None, + source_lease_id: Optional[str] = None, + properties: Optional[str] = None, + permissions: Optional[str] = None, + umask: Optional[str] = None, + path_http_headers: Optional["_models.PathHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Create File | Create Directory | Rename File | Rename Directory. + + Create or rename a file or directory. By default, the destination is overwritten and if the + destination already exists and has a lease the lease is broken. This operation supports + conditional HTTP requests. For more information, see `Specifying Conditional Headers for Blob + Service Operations `_. To fail if the destination already exists, + use a conditional request with If-None-Match: "*". + + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param resource: Required only for Create File and Create Directory. The value must be "file" + or "directory". + :type resource: str or ~azure.storage.filedatalake.models.PathResourceType + :param continuation: Optional. When deleting a directory, the number of paths that are deleted + with each invocation is limited. If the number of paths to be deleted exceeds this limit, a + continuation token is returned in this response header. When a continuation token is returned + in the response, it must be specified in a subsequent invocation of the delete operation to + continue deleting the directory. + :type continuation: str + :param mode: Optional. Valid only when namespace is enabled. This parameter determines the + behavior of the rename operation. The value must be "legacy" or "posix", and the default value + will be "posix". + :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode + :param rename_source: An optional file or directory to be renamed. The value must have the + following format: "/{filesystem}/{path}". If "x-ms-properties" is specified, the properties + will overwrite the existing properties; otherwise, the existing properties will be preserved. + This value must be a URL percent-encoded string. Note that the string may only contain ASCII + characters in the ISO-8859-1 character set. + :type rename_source: str + :param source_lease_id: A lease ID for the source path. If specified, the source path must have + an active lease and the lease ID must match. + :type source_lease_id: str + :param properties: Optional. User-defined properties to be stored with the filesystem, in the + format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value + is a base64 encoded string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties not included in the list + will be removed. All properties are removed if the header is omitted. To merge new and + existing properties, first get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all properties. + :type properties: str + :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type permissions: str + :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, the umask + restricts the permissions of the file or directory to be created. The resulting permission is + given by p bitwise and not u, where p is the permission and u is the umask. For example, if p + is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 + for a directory and 0666 for a file. The default umask is 0027. The umask must be specified + in 4-digit octal notation (e.g. 0766). + :type umask: str + :param path_http_headers: Parameter group. + :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.filedatalake.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _cache_control = None + _content_encoding = None + _content_language = None + _content_disposition = None + _content_type = None + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _source_if_modified_since = None + _source_if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if path_http_headers is not None: + _cache_control = path_http_headers.cache_control + _content_encoding = path_http_headers.content_encoding + _content_language = path_http_headers.content_language + _content_disposition = path_http_headers.content_disposition + _content_type = path_http_headers.content_type + if source_modified_access_conditions is not None: + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + accept = "application/json" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if resource is not None: + query_parameters['resource'] = self._serialize.query("resource", resource, 'str') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if mode is not None: + query_parameters['mode'] = self._serialize.query("mode", mode, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if rename_source is not None: + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def update( + self, + action: Union[str, "_models.PathUpdateAction"], + mode: Union[str, "_models.PathSetAccessControlRecursiveMode"], + body: IO, + request_id_parameter: Optional[str] = None, + timeout: Optional[int] = None, + max_records: Optional[int] = None, + continuation: Optional[str] = None, + force_flag: Optional[bool] = None, + position: Optional[int] = None, + retain_uncommitted_data: Optional[bool] = None, + close: Optional[bool] = None, + content_length: Optional[int] = None, + properties: Optional[str] = None, + owner: Optional[str] = None, + group: Optional[str] = None, + permissions: Optional[str] = None, + acl: Optional[str] = None, + path_http_headers: Optional["_models.PathHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> Optional["_models.SetAccessControlRecursiveResponse"]: + """Append Data | Flush Data | Set Properties | Set Access Control. + + Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, + sets properties for a file or directory, or sets access control for a file or directory. Data + can only be appended to a file. Concurrent writes to the same file using multiple clients are + not supported. This operation supports conditional HTTP requests. For more information, see + `Specifying Conditional Headers for Blob Service Operations `_. + + :param action: The action must be "append" to upload data to be appended to a file, "flush" to + flush previously uploaded data to a file, "setProperties" to set the properties of a file or + directory, "setAccessControl" to set the owner, group, permissions, or access control list for + a file or directory, or "setAccessControlRecursive" to set the access control list for a + directory recursively. Note that Hierarchical Namespace must be enabled for the account in + order to use access control. Also note that the Access Control List (ACL) includes permissions + for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers + are mutually exclusive. + :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction + :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" + modifies one or more POSIX access control rights that pre-exist on files and directories, + "remove" removes one or more POSIX access control rights that were present earlier on files + and directories. + :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode + :param body: Initial data. + :type body: IO + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the + maximum number of files or directories on which the acl change will be applied. If omitted or + greater than 2,000, the request will process up to 2,000 items. + :type max_records: int + :param continuation: Optional. The number of paths processed with each invocation is limited. + If the number of paths to be processed exceeds this limit, a continuation token is returned in + the response header x-ms-continuation. When a continuation token is returned in the response, + it must be percent-encoded and specified in a subsequent invocation of setAcessControlRecursive + operation. + :type continuation: str + :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, + the operation will terminate quickly on encountering user errors (4XX). If true, the operation + will ignore user errors and proceed with the operation on other sub-entities of the directory. + Continuation token will only be returned when forceFlag is true in case of user errors. If not + set the default value is false for this. + :type force_flag: bool + :param position: This parameter allows the caller to upload data in parallel and control the + order in which it is appended to the file. It is required when uploading data to be appended + to the file and when flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not immediately flushed, or + written, to the file. To flush, the previously uploaded data must be contiguous, the position + parameter must be specified and equal to the length of the file after all data has been + written, and there must not be a request entity body included with the request. + :type position: long + :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data + is retained after the flush operation completes; otherwise, the uncommitted data is deleted + after the flush operation. The default is false. Data at offsets less than the specified + position are written to the file when flush succeeds, but this optional parameter allows data + after the flush position to be retained for a future flush operation. + :type retain_uncommitted_data: bool + :param close: Azure Storage Events allow applications to receive notifications when files + change. When Azure Storage Events are enabled, a file changed event is raised. This event has a + property indicating whether this is the final change to distinguish the difference between an + intermediate flush to a file stream and the final close of a file stream. The close query + parameter is valid only when the action is "flush" and change notifications are enabled. If the + value of close is "true" and the flush operation completes successfully, the service raises a + file change notification with a property indicating that this is the final update (the file + stream has been closed). If "false" a change notification is raised indicating the file has + changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to + indicate that the file stream has been closed.". + :type close: bool + :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush + Data". Must be the length of the request content in bytes for "Append Data". + :type content_length: long + :param properties: Optional. User-defined properties to be stored with the filesystem, in the + format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value + is a base64 encoded string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties not included in the list + will be removed. All properties are removed if the header is omitted. To merge new and + existing properties, first get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all properties. + :type properties: str + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type permissions: str + :param acl: Sets POSIX access control rights on files and directories. The value is a comma- + separated list of access control entries. Each access control entry (ACE) consists of a scope, + a type, a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param path_http_headers: Parameter group. + :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SetAccessControlRecursiveResponse, or the result of cls(response) + :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SetAccessControlRecursiveResponse"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _content_md5 = None + _lease_id = None + _cache_control = None + _content_type = None + _content_disposition = None + _content_encoding = None + _content_language = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if path_http_headers is not None: + _content_md5 = path_http_headers.content_md5 + _cache_control = path_http_headers.cache_control + _content_type = path_http_headers.content_type + _content_disposition = path_http_headers.content_disposition + _content_encoding = path_http_headers.content_encoding + _content_language = path_http_headers.content_language + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/json" + + # Construct URL + url = self.update.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['action'] = self._serialize.query("action", action, 'str') + if max_records is not None: + query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + query_parameters['mode'] = self._serialize.query("mode", mode, 'str') + if force_flag is not None: + query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if retain_uncommitted_data is not None: + query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') + if close is not None: + query_parameters['close'] = self._serialize.query("close", close, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if _content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + deserialized = None + if response.status_code == 200: + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) + response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) + response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) + + if response.status_code == 202: + response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + update.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def lease( + self, + x_ms_lease_action: Union[str, "_models.PathLeaseAction"], + request_id_parameter: Optional[str] = None, + timeout: Optional[int] = None, + x_ms_lease_duration: Optional[int] = None, + x_ms_lease_break_period: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Lease Path. + + Create and manage a lease to restrict write and delete access to the path. This operation + supports conditional HTTP requests. For more information, see `Specifying Conditional Headers + for Blob Service Operations `_. + + :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew", + and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration" + to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the + lease break period is allowed to elapse, during which time no lease operation except break and + release can be performed on the file. When a lease is successfully broken, the response + indicates the interval in seconds until a new lease can be acquired. Use "change" and specify + the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to + change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an + existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease. + :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies + the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or + -1 for infinite lease. + :type x_ms_lease_duration: int + :param x_ms_lease_break_period: The lease break period duration is optional to break a lease, + and specifies the break period of the lease in seconds. The lease break duration must be + between 0 and 60 seconds. + :type x_ms_lease_break_period: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + accept = "application/json" + + # Construct URL + url = self.lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'str') + if x_ms_lease_duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int') + if x_ms_lease_break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + + if response.status_code == 201: + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + + if response.status_code == 202: + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-lease-time']=self._deserialize('str', response.headers.get('x-ms-lease-time')) + + if cls: + return cls(pipeline_response, None, response_headers) + + lease.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def read( + self, + request_id_parameter: Optional[str] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + x_ms_range_get_content_md5: Optional[bool] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> IO: + """Read File. + + Read the contents of a file. For read operations, range requests are supported. This operation + supports conditional HTTP requests. For more information, see `Specifying Conditional Headers + for Blob Service Operations `_. + + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: The HTTP Range request header specifies one or more byte ranges of the resource + to be retrieved. + :type range: str + :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified + together with the Range header, the service returns the MD5 hash for the range, as long as the + range is less than or equal to 4MB in size. If this header is specified without the Range + header, the service returns status code 400 (Bad Request). If this header is set to true when + the range exceeds 4 MB in size, the service returns status code 400 (Bad Request). + :type x_ms_range_get_content_md5: bool + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + accept = "application/json" + + # Construct URL + url = self.read.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if range is not None: + header_parameters['Range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if x_ms_range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) + response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) + response_headers['x-ms-content-md5']=self._deserialize('str', response.headers.get('x-ms-content-md5')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) + response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + read.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def get_properties( + self, + request_id_parameter: Optional[str] = None, + timeout: Optional[int] = None, + action: Optional[Union[str, "_models.PathGetPropertiesAction"]] = None, + upn: Optional[bool] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Get Properties | Get Status | Get Access Control List. + + Get Properties returns all system and user defined properties for a path. Get Status returns + all system defined properties for a path. Get Access Control List returns the access control + list for a path. This operation supports conditional HTTP requests. For more information, see + `Specifying Conditional Headers for Blob Service Operations `_. + + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param action: Optional. If the value is "getStatus" only the system defined properties for the + path are returned. If the value is "getAccessControl" the access control list is returned in + the response headers (Hierarchical Namespace must be enabled for the account), otherwise the + properties are returned. + :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction + :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If + "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response + headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If + "false", the values will be returned as Azure Active Directory Object IDs. The default value is + false. Note that group and application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + accept = "application/json" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if action is not None: + query_parameters['action'] = self._serialize.query("action", action, 'str') + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) + response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) + response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) + response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) + response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) + response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def delete( + self, + request_id_parameter: Optional[str] = None, + timeout: Optional[int] = None, + recursive: Optional[bool] = None, + continuation: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Delete File | Delete Directory. + + Delete the file or directory. This operation supports conditional HTTP requests. For more + information, see `Specifying Conditional Headers for Blob Service Operations + `_. + + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param recursive: Required. + :type recursive: bool + :param continuation: Optional. When deleting a directory, the number of paths that are deleted + with each invocation is limited. If the number of paths to be deleted exceeds this limit, a + continuation token is returned in this response header. When a continuation token is returned + in the response, it must be specified in a subsequent invocation of the delete operation to + continue deleting the directory. + :type continuation: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + accept = "application/json" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) + response_headers['x-ms-deletion-id']=self._deserialize('str', response.headers.get('x-ms-deletion-id')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def set_access_control( + self, + timeout: Optional[int] = None, + owner: Optional[str] = None, + group: Optional[str] = None, + permissions: Optional[str] = None, + acl: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Set the owner, group, permissions, or access control list for a path. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type permissions: str + :param acl: Sets POSIX access control rights on files and directories. The value is a comma- + separated list of access control entries. Each access control entry (ACE) consists of a scope, + a type, a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "setAccessControl" + accept = "application/json" + + # Construct URL + url = self.set_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def set_access_control_recursive( + self, + mode: Union[str, "_models.PathSetAccessControlRecursiveMode"], + timeout: Optional[int] = None, + continuation: Optional[str] = None, + force_flag: Optional[bool] = None, + max_records: Optional[int] = None, + acl: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs + ) -> "_models.SetAccessControlRecursiveResponse": + """Set the access control list for a path and subpaths. + + :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" + modifies one or more POSIX access control rights that pre-exist on files and directories, + "remove" removes one or more POSIX access control rights that were present earlier on files + and directories. + :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param continuation: Optional. When deleting a directory, the number of paths that are deleted + with each invocation is limited. If the number of paths to be deleted exceeds this limit, a + continuation token is returned in this response header. When a continuation token is returned + in the response, it must be specified in a subsequent invocation of the delete operation to + continue deleting the directory. + :type continuation: str + :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, + the operation will terminate quickly on encountering user errors (4XX). If true, the operation + will ignore user errors and proceed with the operation on other sub-entities of the directory. + Continuation token will only be returned when forceFlag is true in case of user errors. If not + set the default value is false for this. + :type force_flag: bool + :param max_records: Optional. It specifies the maximum number of files or directories on which + the acl change will be applied. If omitted or greater than 2,000, the request will process up + to 2,000 items. + :type max_records: int + :param acl: Sets POSIX access control rights on files and directories. The value is a comma- + separated list of access control entries. Each access control entry (ACE) consists of a scope, + a type, a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SetAccessControlRecursiveResponse, or the result of cls(response) + :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.SetAccessControlRecursiveResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + action = "setAccessControlRecursive" + accept = "application/json" + + # Construct URL + url = self.set_access_control_recursive.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + query_parameters['mode'] = self._serialize.query("mode", mode, 'str') + if force_flag is not None: + query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') + if max_records is not None: + query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def flush_data( + self, + timeout: Optional[int] = None, + position: Optional[int] = None, + retain_uncommitted_data: Optional[bool] = None, + close: Optional[bool] = None, + content_length: Optional[int] = None, + request_id_parameter: Optional[str] = None, + path_http_headers: Optional["_models.PathHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + modified_access_conditions: Optional["_models.ModifiedAccessConditions"] = None, + **kwargs + ) -> None: + """Set the owner, group, permissions, or access control list for a path. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param position: This parameter allows the caller to upload data in parallel and control the + order in which it is appended to the file. It is required when uploading data to be appended + to the file and when flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not immediately flushed, or + written, to the file. To flush, the previously uploaded data must be contiguous, the position + parameter must be specified and equal to the length of the file after all data has been + written, and there must not be a request entity body included with the request. + :type position: long + :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data + is retained after the flush operation completes; otherwise, the uncommitted data is deleted + after the flush operation. The default is false. Data at offsets less than the specified + position are written to the file when flush succeeds, but this optional parameter allows data + after the flush position to be retained for a future flush operation. + :type retain_uncommitted_data: bool + :param close: Azure Storage Events allow applications to receive notifications when files + change. When Azure Storage Events are enabled, a file changed event is raised. This event has a + property indicating whether this is the final change to distinguish the difference between an + intermediate flush to a file stream and the final close of a file stream. The close query + parameter is valid only when the action is "flush" and change notifications are enabled. If the + value of close is "true" and the flush operation completes successfully, the service raises a + file change notification with a property indicating that this is the final update (the file + stream has been closed). If "false" a change notification is raised indicating the file has + changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to + indicate that the file stream has been closed.". + :type close: bool + :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush + Data". Must be the length of the request content in bytes for "Append Data". + :type content_length: long + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param path_http_headers: Parameter group. + :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _content_md5 = None + _lease_id = None + _cache_control = None + _content_type = None + _content_disposition = None + _content_encoding = None + _content_language = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if path_http_headers is not None: + _content_md5 = path_http_headers.content_md5 + _cache_control = path_http_headers.cache_control + _content_type = path_http_headers.content_type + _content_disposition = path_http_headers.content_disposition + _content_encoding = path_http_headers.content_encoding + _content_language = path_http_headers.content_language + action = "flush" + accept = "application/json" + + # Construct URL + url = self.flush_data.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if retain_uncommitted_data is not None: + query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') + if close is not None: + query_parameters['close'] = self._serialize.query("close", close, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if _content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + flush_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def append_data( + self, + body: IO, + position: Optional[int] = None, + timeout: Optional[int] = None, + content_length: Optional[int] = None, + transactional_content_crc64: Optional[bytearray] = None, + request_id_parameter: Optional[str] = None, + path_http_headers: Optional["_models.PathHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs + ) -> None: + """Append data to the file. + + :param body: Initial data. + :type body: IO + :param position: This parameter allows the caller to upload data in parallel and control the + order in which it is appended to the file. It is required when uploading data to be appended + to the file and when flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not immediately flushed, or + written, to the file. To flush, the previously uploaded data must be contiguous, the position + parameter must be specified and equal to the length of the file after all data has been + written, and there must not be a request entity body included with the request. + :type position: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush + Data". Must be the length of the request content in bytes for "Append Data". + :type content_length: long + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param path_http_headers: Parameter group. + :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _transactional_content_hash = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if path_http_headers is not None: + _transactional_content_hash = path_http_headers.transactional_content_hash + action = "append" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.append_data.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if _transactional_content_hash is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", _transactional_content_hash, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + append_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def set_expiry( + self, + expiry_options: Union[str, "_models.PathExpiryOptions"], + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + expires_on: Optional[str] = None, + **kwargs + ) -> None: + """Sets the time a blob will expire and be deleted. + + :param expiry_options: Required. Indicates mode of the expiry time. + :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param expires_on: The time to set the blob to expiry. + :type expires_on: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "expiry" + accept = "application/json" + + # Construct URL + url = self.set_expiry.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') + if expires_on is not None: + header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_expiry.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + async def undelete( + self, + timeout: Optional[int] = None, + undelete_source: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs + ) -> None: + """Undelete a path that was previously soft deleted. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param undelete_source: Only for hierarchical namespace enabled accounts. Optional. The path of + the soft deleted blob to undelete. + :type undelete_source: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "undelete" + accept = "application/json" + + # Construct URL + url = self.undelete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if undelete_source is not None: + header_parameters['x-ms-undelete-source'] = self._serialize.header("undelete_source", undelete_source, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + undelete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_service_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_service_operations.py new file mode 100644 index 0000000..f8ae878 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/aio/operations/_service_operations.py @@ -0,0 +1,148 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar +import warnings + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class ServiceOperations: + """ServiceOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.filedatalake.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list_file_systems( + self, + prefix: Optional[str] = None, + continuation: Optional[str] = None, + max_results: Optional[int] = None, + request_id_parameter: Optional[str] = None, + timeout: Optional[int] = None, + **kwargs + ) -> AsyncIterable["_models.FileSystemList"]: + """List FileSystems. + + List filesystems and their properties in given account. + + :param prefix: Filters results to filesystems within the specified prefix. + :type prefix: str + :param continuation: Optional. When deleting a directory, the number of paths that are deleted + with each invocation is limited. If the number of paths to be deleted exceeds this limit, a + continuation token is returned in this response header. When a continuation token is returned + in the response, it must be specified in a subsequent invocation of the delete operation to + continue deleting the directory. + :type continuation: str + :param max_results: An optional value that specifies the maximum number of items to return. If + omitted or greater than 5,000, the response will include up to 5,000 items. + :type max_results: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either FileSystemList or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.filedatalake.models.FileSystemList] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.FileSystemList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + resource = "account" + accept = "application/json" + + def prepare_request(next_link=None): + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + if not next_link: + # Construct URL + url = self.list_file_systems.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['resource'] = self._serialize.query("resource", resource, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if max_results is not None: + query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + request = self._client.get(url, query_parameters, header_parameters) + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + request = self._client.get(url, query_parameters, header_parameters) + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize('FileSystemList', pipeline_response) + list_of_elem = deserialized.filesystems + if cls: + list_of_elem = cls(list_of_elem) + return None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged( + get_next, extract_data + ) + list_file_systems.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/__init__.py new file mode 100644 index 0000000..fc4548f --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/__init__.py @@ -0,0 +1,83 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import AclFailedEntry + from ._models_py3 import BlobHierarchyListSegment + from ._models_py3 import BlobItemInternal + from ._models_py3 import BlobPrefix + from ._models_py3 import BlobPropertiesInternal + from ._models_py3 import FileSystem + from ._models_py3 import FileSystemList + from ._models_py3 import LeaseAccessConditions + from ._models_py3 import ListBlobsHierarchySegmentResponse + from ._models_py3 import ModifiedAccessConditions + from ._models_py3 import Path + from ._models_py3 import PathHTTPHeaders + from ._models_py3 import PathList + from ._models_py3 import SetAccessControlRecursiveResponse + from ._models_py3 import SourceModifiedAccessConditions + from ._models_py3 import StorageError + from ._models_py3 import StorageErrorError +except (SyntaxError, ImportError): + from ._models import AclFailedEntry # type: ignore + from ._models import BlobHierarchyListSegment # type: ignore + from ._models import BlobItemInternal # type: ignore + from ._models import BlobPrefix # type: ignore + from ._models import BlobPropertiesInternal # type: ignore + from ._models import FileSystem # type: ignore + from ._models import FileSystemList # type: ignore + from ._models import LeaseAccessConditions # type: ignore + from ._models import ListBlobsHierarchySegmentResponse # type: ignore + from ._models import ModifiedAccessConditions # type: ignore + from ._models import Path # type: ignore + from ._models import PathHTTPHeaders # type: ignore + from ._models import PathList # type: ignore + from ._models import SetAccessControlRecursiveResponse # type: ignore + from ._models import SourceModifiedAccessConditions # type: ignore + from ._models import StorageError # type: ignore + from ._models import StorageErrorError # type: ignore + +from ._azure_data_lake_storage_restapi_enums import ( + ListBlobsIncludeItem, + PathExpiryOptions, + PathGetPropertiesAction, + PathLeaseAction, + PathRenameMode, + PathResourceType, + PathSetAccessControlRecursiveMode, + PathUpdateAction, +) + +__all__ = [ + 'AclFailedEntry', + 'BlobHierarchyListSegment', + 'BlobItemInternal', + 'BlobPrefix', + 'BlobPropertiesInternal', + 'FileSystem', + 'FileSystemList', + 'LeaseAccessConditions', + 'ListBlobsHierarchySegmentResponse', + 'ModifiedAccessConditions', + 'Path', + 'PathHTTPHeaders', + 'PathList', + 'SetAccessControlRecursiveResponse', + 'SourceModifiedAccessConditions', + 'StorageError', + 'StorageErrorError', + 'ListBlobsIncludeItem', + 'PathExpiryOptions', + 'PathGetPropertiesAction', + 'PathLeaseAction', + 'PathRenameMode', + 'PathResourceType', + 'PathSetAccessControlRecursiveMode', + 'PathUpdateAction', +] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_azure_data_lake_storage_restapi_enums.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_azure_data_lake_storage_restapi_enums.py new file mode 100644 index 0000000..804050e --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_azure_data_lake_storage_restapi_enums.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum, EnumMeta +from six import with_metaclass + +class _CaseInsensitiveEnumMeta(EnumMeta): + def __getitem__(self, name): + return super().__getitem__(name.upper()) + + def __getattr__(cls, name): + """Return the enum member matching `name` + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + """ + try: + return cls._member_map_[name.upper()] + except KeyError: + raise AttributeError(name) + + +class ListBlobsIncludeItem(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + COPY = "copy" + DELETED = "deleted" + METADATA = "metadata" + SNAPSHOTS = "snapshots" + UNCOMMITTEDBLOBS = "uncommittedblobs" + VERSIONS = "versions" + TAGS = "tags" + +class PathExpiryOptions(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + NEVER_EXPIRE = "NeverExpire" + RELATIVE_TO_CREATION = "RelativeToCreation" + RELATIVE_TO_NOW = "RelativeToNow" + ABSOLUTE = "Absolute" + +class PathGetPropertiesAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + GET_ACCESS_CONTROL = "getAccessControl" + GET_STATUS = "getStatus" + +class PathLeaseAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + ACQUIRE = "acquire" + BREAK_ENUM = "break" + CHANGE = "change" + RENEW = "renew" + RELEASE = "release" + +class PathRenameMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + LEGACY = "legacy" + POSIX = "posix" + +class PathResourceType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + DIRECTORY = "directory" + FILE = "file" + +class PathSetAccessControlRecursiveMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + SET = "set" + MODIFY = "modify" + REMOVE = "remove" + +class PathUpdateAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + APPEND = "append" + FLUSH = "flush" + SET_PROPERTIES = "setProperties" + SET_ACCESS_CONTROL = "setAccessControl" + SET_ACCESS_CONTROL_RECURSIVE = "setAccessControlRecursive" diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_models.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_models.py new file mode 100644 index 0000000..237617a --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_models.py @@ -0,0 +1,672 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import HttpResponseError +import msrest.serialization + + +class AclFailedEntry(msrest.serialization.Model): + """AclFailedEntry. + + :param name: + :type name: str + :param type: + :type type: str + :param error_message: + :type error_message: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(AclFailedEntry, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.type = kwargs.get('type', None) + self.error_message = kwargs.get('error_message', None) + + +class BlobHierarchyListSegment(msrest.serialization.Model): + """BlobHierarchyListSegment. + + All required parameters must be populated in order to send to Azure. + + :param blob_prefixes: + :type blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix] + :param blob_items: Required. + :type blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal] + """ + + _validation = { + 'blob_items': {'required': True}, + } + + _attribute_map = { + 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]'}, + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, + } + _xml_map = { + 'name': 'Blobs' + } + + def __init__( + self, + **kwargs + ): + super(BlobHierarchyListSegment, self).__init__(**kwargs) + self.blob_prefixes = kwargs.get('blob_prefixes', None) + self.blob_items = kwargs['blob_items'] + + +class BlobItemInternal(msrest.serialization.Model): + """An Azure Storage blob. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param deleted: Required. + :type deleted: bool + :param snapshot: Required. + :type snapshot: str + :param version_id: + :type version_id: str + :param is_current_version: + :type is_current_version: bool + :param properties: Required. Properties of a blob. + :type properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal + :param deletion_id: + :type deletion_id: str + """ + + _validation = { + 'name': {'required': True}, + 'deleted': {'required': True}, + 'snapshot': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'deleted': {'key': 'Deleted', 'type': 'bool'}, + 'snapshot': {'key': 'Snapshot', 'type': 'str'}, + 'version_id': {'key': 'VersionId', 'type': 'str'}, + 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, + 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, + 'deletion_id': {'key': 'DeletionId', 'type': 'str'}, + } + _xml_map = { + 'name': 'Blob' + } + + def __init__( + self, + **kwargs + ): + super(BlobItemInternal, self).__init__(**kwargs) + self.name = kwargs['name'] + self.deleted = kwargs['deleted'] + self.snapshot = kwargs['snapshot'] + self.version_id = kwargs.get('version_id', None) + self.is_current_version = kwargs.get('is_current_version', None) + self.properties = kwargs['properties'] + self.deletion_id = kwargs.get('deletion_id', None) + + +class BlobPrefix(msrest.serialization.Model): + """BlobPrefix. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(BlobPrefix, self).__init__(**kwargs) + self.name = kwargs['name'] + + +class BlobPropertiesInternal(msrest.serialization.Model): + """Properties of a blob. + + All required parameters must be populated in order to send to Azure. + + :param creation_time: + :type creation_time: ~datetime.datetime + :param last_modified: Required. + :type last_modified: ~datetime.datetime + :param etag: Required. + :type etag: str + :param content_length: Size in bytes. + :type content_length: long + :param content_type: + :type content_type: str + :param content_encoding: + :type content_encoding: str + :param content_language: + :type content_language: str + :param content_md5: + :type content_md5: bytearray + :param content_disposition: + :type content_disposition: str + :param cache_control: + :type cache_control: str + :param blob_sequence_number: + :type blob_sequence_number: long + :param copy_id: + :type copy_id: str + :param copy_source: + :type copy_source: str + :param copy_progress: + :type copy_progress: str + :param copy_completion_time: + :type copy_completion_time: ~datetime.datetime + :param copy_status_description: + :type copy_status_description: str + :param server_encrypted: + :type server_encrypted: bool + :param incremental_copy: + :type incremental_copy: bool + :param destination_snapshot: + :type destination_snapshot: str + :param deleted_time: + :type deleted_time: ~datetime.datetime + :param remaining_retention_days: + :type remaining_retention_days: int + :param access_tier_inferred: + :type access_tier_inferred: bool + :param customer_provided_key_sha256: + :type customer_provided_key_sha256: str + :param encryption_scope: The name of the encryption scope under which the blob is encrypted. + :type encryption_scope: str + :param access_tier_change_time: + :type access_tier_change_time: ~datetime.datetime + :param tag_count: + :type tag_count: int + :param expires_on: + :type expires_on: ~datetime.datetime + :param is_sealed: + :type is_sealed: bool + :param last_accessed_on: + :type last_accessed_on: ~datetime.datetime + :param delete_time: + :type delete_time: ~datetime.datetime + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + } + + _attribute_map = { + 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, + 'etag': {'key': 'Etag', 'type': 'str'}, + 'content_length': {'key': 'Content-Length', 'type': 'long'}, + 'content_type': {'key': 'Content-Type', 'type': 'str'}, + 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, + 'content_language': {'key': 'Content-Language', 'type': 'str'}, + 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, + 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, + 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, + 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, + 'copy_id': {'key': 'CopyId', 'type': 'str'}, + 'copy_source': {'key': 'CopySource', 'type': 'str'}, + 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, + 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, + 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, + 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, + 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, + 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, + 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, + 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, + 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, + 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, + 'tag_count': {'key': 'TagCount', 'type': 'int'}, + 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, + 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, + 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, + 'delete_time': {'key': 'DeleteTime', 'type': 'rfc-1123'}, + } + _xml_map = { + 'name': 'Properties' + } + + def __init__( + self, + **kwargs + ): + super(BlobPropertiesInternal, self).__init__(**kwargs) + self.creation_time = kwargs.get('creation_time', None) + self.last_modified = kwargs['last_modified'] + self.etag = kwargs['etag'] + self.content_length = kwargs.get('content_length', None) + self.content_type = kwargs.get('content_type', None) + self.content_encoding = kwargs.get('content_encoding', None) + self.content_language = kwargs.get('content_language', None) + self.content_md5 = kwargs.get('content_md5', None) + self.content_disposition = kwargs.get('content_disposition', None) + self.cache_control = kwargs.get('cache_control', None) + self.blob_sequence_number = kwargs.get('blob_sequence_number', None) + self.copy_id = kwargs.get('copy_id', None) + self.copy_source = kwargs.get('copy_source', None) + self.copy_progress = kwargs.get('copy_progress', None) + self.copy_completion_time = kwargs.get('copy_completion_time', None) + self.copy_status_description = kwargs.get('copy_status_description', None) + self.server_encrypted = kwargs.get('server_encrypted', None) + self.incremental_copy = kwargs.get('incremental_copy', None) + self.destination_snapshot = kwargs.get('destination_snapshot', None) + self.deleted_time = kwargs.get('deleted_time', None) + self.remaining_retention_days = kwargs.get('remaining_retention_days', None) + self.access_tier_inferred = kwargs.get('access_tier_inferred', None) + self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None) + self.encryption_scope = kwargs.get('encryption_scope', None) + self.access_tier_change_time = kwargs.get('access_tier_change_time', None) + self.tag_count = kwargs.get('tag_count', None) + self.expires_on = kwargs.get('expires_on', None) + self.is_sealed = kwargs.get('is_sealed', None) + self.last_accessed_on = kwargs.get('last_accessed_on', None) + self.delete_time = kwargs.get('delete_time', None) + + +class FileSystem(msrest.serialization.Model): + """FileSystem. + + :param name: + :type name: str + :param last_modified: + :type last_modified: str + :param e_tag: + :type e_tag: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(FileSystem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.last_modified = kwargs.get('last_modified', None) + self.e_tag = kwargs.get('e_tag', None) + + +class FileSystemList(msrest.serialization.Model): + """FileSystemList. + + :param filesystems: + :type filesystems: list[~azure.storage.filedatalake.models.FileSystem] + """ + + _attribute_map = { + 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'}, + } + + def __init__( + self, + **kwargs + ): + super(FileSystemList, self).__init__(**kwargs) + self.filesystems = kwargs.get('filesystems', None) + + +class LeaseAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param lease_id: If specified, the operation only succeeds if the resource's lease is active + and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': 'leaseId', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = kwargs.get('lease_id', None) + + +class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): + """An enumeration of blobs. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param container_name: Required. + :type container_name: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param delimiter: + :type delimiter: str + :param segment: Required. + :type segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_name': {'required': True}, + 'segment': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'delimiter': {'key': 'Delimiter', 'type': 'str'}, + 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + **kwargs + ): + super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs['service_endpoint'] + self.container_name = kwargs['container_name'] + self.prefix = kwargs.get('prefix', None) + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.delimiter = kwargs.get('delimiter', None) + self.segment = kwargs['segment'] + self.next_marker = kwargs.get('next_marker', None) + + +class ModifiedAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param if_modified_since: Specify this header value to operate only on a blob if it has been + modified since the specified date/time. + :type if_modified_since: ~datetime.datetime + :param if_unmodified_since: Specify this header value to operate only on a blob if it has not + been modified since the specified date/time. + :type if_unmodified_since: ~datetime.datetime + :param if_match: Specify an ETag value to operate only on blobs with a matching value. + :type if_match: str + :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. + :type if_none_match: str + """ + + _attribute_map = { + 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, + 'if_match': {'key': 'ifMatch', 'type': 'str'}, + 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ModifiedAccessConditions, self).__init__(**kwargs) + self.if_modified_since = kwargs.get('if_modified_since', None) + self.if_unmodified_since = kwargs.get('if_unmodified_since', None) + self.if_match = kwargs.get('if_match', None) + self.if_none_match = kwargs.get('if_none_match', None) + + +class Path(msrest.serialization.Model): + """Path. + + :param name: + :type name: str + :param is_directory: + :type is_directory: bool + :param last_modified: + :type last_modified: str + :param e_tag: + :type e_tag: str + :param content_length: + :type content_length: long + :param owner: + :type owner: str + :param group: + :type group: str + :param permissions: + :type permissions: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, + 'last_modified': {'key': 'lastModified', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'content_length': {'key': 'contentLength', 'type': 'long'}, + 'owner': {'key': 'owner', 'type': 'str'}, + 'group': {'key': 'group', 'type': 'str'}, + 'permissions': {'key': 'permissions', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(Path, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.is_directory = kwargs.get('is_directory', False) + self.last_modified = kwargs.get('last_modified', None) + self.e_tag = kwargs.get('e_tag', None) + self.content_length = kwargs.get('content_length', None) + self.owner = kwargs.get('owner', None) + self.group = kwargs.get('group', None) + self.permissions = kwargs.get('permissions', None) + + +class PathHTTPHeaders(msrest.serialization.Model): + """Parameter group. + + :param cache_control: Optional. Sets the blob's cache control. If specified, this property is + stored with the blob and returned with a read request. + :type cache_control: str + :param content_encoding: Optional. Sets the blob's content encoding. If specified, this + property is stored with the blob and returned with a read request. + :type content_encoding: str + :param content_language: Optional. Set the blob's content language. If specified, this property + is stored with the blob and returned with a read request. + :type content_language: str + :param content_disposition: Optional. Sets the blob's Content-Disposition header. + :type content_disposition: str + :param content_type: Optional. Sets the blob's content type. If specified, this property is + stored with the blob and returned with a read request. + :type content_type: str + :param content_md5: Specify the transactional md5 for the body, to be validated by the service. + :type content_md5: bytearray + :param transactional_content_hash: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_hash: bytearray + """ + + _attribute_map = { + 'cache_control': {'key': 'cacheControl', 'type': 'str'}, + 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, + 'content_language': {'key': 'contentLanguage', 'type': 'str'}, + 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, + 'content_type': {'key': 'contentType', 'type': 'str'}, + 'content_md5': {'key': 'contentMD5', 'type': 'bytearray'}, + 'transactional_content_hash': {'key': 'transactionalContentHash', 'type': 'bytearray'}, + } + + def __init__( + self, + **kwargs + ): + super(PathHTTPHeaders, self).__init__(**kwargs) + self.cache_control = kwargs.get('cache_control', None) + self.content_encoding = kwargs.get('content_encoding', None) + self.content_language = kwargs.get('content_language', None) + self.content_disposition = kwargs.get('content_disposition', None) + self.content_type = kwargs.get('content_type', None) + self.content_md5 = kwargs.get('content_md5', None) + self.transactional_content_hash = kwargs.get('transactional_content_hash', None) + + +class PathList(msrest.serialization.Model): + """PathList. + + :param paths: + :type paths: list[~azure.storage.filedatalake.models.Path] + """ + + _attribute_map = { + 'paths': {'key': 'paths', 'type': '[Path]'}, + } + + def __init__( + self, + **kwargs + ): + super(PathList, self).__init__(**kwargs) + self.paths = kwargs.get('paths', None) + + +class SetAccessControlRecursiveResponse(msrest.serialization.Model): + """SetAccessControlRecursiveResponse. + + :param directories_successful: + :type directories_successful: int + :param files_successful: + :type files_successful: int + :param failure_count: + :type failure_count: int + :param failed_entries: + :type failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry] + """ + + _attribute_map = { + 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, + 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, + 'failure_count': {'key': 'failureCount', 'type': 'int'}, + 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, + } + + def __init__( + self, + **kwargs + ): + super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) + self.directories_successful = kwargs.get('directories_successful', None) + self.files_successful = kwargs.get('files_successful', None) + self.failure_count = kwargs.get('failure_count', None) + self.failed_entries = kwargs.get('failed_entries', None) + + +class SourceModifiedAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. + :type source_if_match: str + :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching + value. + :type source_if_none_match: str + :param source_if_modified_since: Specify this header value to operate only on a blob if it has + been modified since the specified date/time. + :type source_if_modified_since: ~datetime.datetime + :param source_if_unmodified_since: Specify this header value to operate only on a blob if it + has not been modified since the specified date/time. + :type source_if_unmodified_since: ~datetime.datetime + """ + + _attribute_map = { + 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, + 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, + 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, + 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, + } + + def __init__( + self, + **kwargs + ): + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_match = kwargs.get('source_if_match', None) + self.source_if_none_match = kwargs.get('source_if_none_match', None) + self.source_if_modified_since = kwargs.get('source_if_modified_since', None) + self.source_if_unmodified_since = kwargs.get('source_if_unmodified_since', None) + + +class StorageError(msrest.serialization.Model): + """StorageError. + + :param error: The service error response object. + :type error: ~azure.storage.filedatalake.models.StorageErrorError + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'StorageErrorError'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageError, self).__init__(**kwargs) + self.error = kwargs.get('error', None) + + +class StorageErrorError(msrest.serialization.Model): + """The service error response object. + + :param code: The service error code. + :type code: str + :param message: The service error message. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'Code', 'type': 'str'}, + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageErrorError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_models_py3.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_models_py3.py new file mode 100644 index 0000000..bbe361c --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/models/_models_py3.py @@ -0,0 +1,779 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +from typing import List, Optional + +from azure.core.exceptions import HttpResponseError +import msrest.serialization + + +class AclFailedEntry(msrest.serialization.Model): + """AclFailedEntry. + + :param name: + :type name: str + :param type: + :type type: str + :param error_message: + :type error_message: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + } + + def __init__( + self, + *, + name: Optional[str] = None, + type: Optional[str] = None, + error_message: Optional[str] = None, + **kwargs + ): + super(AclFailedEntry, self).__init__(**kwargs) + self.name = name + self.type = type + self.error_message = error_message + + +class BlobHierarchyListSegment(msrest.serialization.Model): + """BlobHierarchyListSegment. + + All required parameters must be populated in order to send to Azure. + + :param blob_prefixes: + :type blob_prefixes: list[~azure.storage.filedatalake.models.BlobPrefix] + :param blob_items: Required. + :type blob_items: list[~azure.storage.filedatalake.models.BlobItemInternal] + """ + + _validation = { + 'blob_items': {'required': True}, + } + + _attribute_map = { + 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]'}, + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]'}, + } + _xml_map = { + 'name': 'Blobs' + } + + def __init__( + self, + *, + blob_items: List["BlobItemInternal"], + blob_prefixes: Optional[List["BlobPrefix"]] = None, + **kwargs + ): + super(BlobHierarchyListSegment, self).__init__(**kwargs) + self.blob_prefixes = blob_prefixes + self.blob_items = blob_items + + +class BlobItemInternal(msrest.serialization.Model): + """An Azure Storage blob. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param deleted: Required. + :type deleted: bool + :param snapshot: Required. + :type snapshot: str + :param version_id: + :type version_id: str + :param is_current_version: + :type is_current_version: bool + :param properties: Required. Properties of a blob. + :type properties: ~azure.storage.filedatalake.models.BlobPropertiesInternal + :param deletion_id: + :type deletion_id: str + """ + + _validation = { + 'name': {'required': True}, + 'deleted': {'required': True}, + 'snapshot': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'deleted': {'key': 'Deleted', 'type': 'bool'}, + 'snapshot': {'key': 'Snapshot', 'type': 'str'}, + 'version_id': {'key': 'VersionId', 'type': 'str'}, + 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool'}, + 'properties': {'key': 'Properties', 'type': 'BlobPropertiesInternal'}, + 'deletion_id': {'key': 'DeletionId', 'type': 'str'}, + } + _xml_map = { + 'name': 'Blob' + } + + def __init__( + self, + *, + name: str, + deleted: bool, + snapshot: str, + properties: "BlobPropertiesInternal", + version_id: Optional[str] = None, + is_current_version: Optional[bool] = None, + deletion_id: Optional[str] = None, + **kwargs + ): + super(BlobItemInternal, self).__init__(**kwargs) + self.name = name + self.deleted = deleted + self.snapshot = snapshot + self.version_id = version_id + self.is_current_version = is_current_version + self.properties = properties + self.deletion_id = deletion_id + + +class BlobPrefix(msrest.serialization.Model): + """BlobPrefix. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(BlobPrefix, self).__init__(**kwargs) + self.name = name + + +class BlobPropertiesInternal(msrest.serialization.Model): + """Properties of a blob. + + All required parameters must be populated in order to send to Azure. + + :param creation_time: + :type creation_time: ~datetime.datetime + :param last_modified: Required. + :type last_modified: ~datetime.datetime + :param etag: Required. + :type etag: str + :param content_length: Size in bytes. + :type content_length: long + :param content_type: + :type content_type: str + :param content_encoding: + :type content_encoding: str + :param content_language: + :type content_language: str + :param content_md5: + :type content_md5: bytearray + :param content_disposition: + :type content_disposition: str + :param cache_control: + :type cache_control: str + :param blob_sequence_number: + :type blob_sequence_number: long + :param copy_id: + :type copy_id: str + :param copy_source: + :type copy_source: str + :param copy_progress: + :type copy_progress: str + :param copy_completion_time: + :type copy_completion_time: ~datetime.datetime + :param copy_status_description: + :type copy_status_description: str + :param server_encrypted: + :type server_encrypted: bool + :param incremental_copy: + :type incremental_copy: bool + :param destination_snapshot: + :type destination_snapshot: str + :param deleted_time: + :type deleted_time: ~datetime.datetime + :param remaining_retention_days: + :type remaining_retention_days: int + :param access_tier_inferred: + :type access_tier_inferred: bool + :param customer_provided_key_sha256: + :type customer_provided_key_sha256: str + :param encryption_scope: The name of the encryption scope under which the blob is encrypted. + :type encryption_scope: str + :param access_tier_change_time: + :type access_tier_change_time: ~datetime.datetime + :param tag_count: + :type tag_count: int + :param expires_on: + :type expires_on: ~datetime.datetime + :param is_sealed: + :type is_sealed: bool + :param last_accessed_on: + :type last_accessed_on: ~datetime.datetime + :param delete_time: + :type delete_time: ~datetime.datetime + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + } + + _attribute_map = { + 'creation_time': {'key': 'Creation-Time', 'type': 'rfc-1123'}, + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, + 'etag': {'key': 'Etag', 'type': 'str'}, + 'content_length': {'key': 'Content-Length', 'type': 'long'}, + 'content_type': {'key': 'Content-Type', 'type': 'str'}, + 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, + 'content_language': {'key': 'Content-Language', 'type': 'str'}, + 'content_md5': {'key': 'Content-MD5', 'type': 'bytearray'}, + 'content_disposition': {'key': 'Content-Disposition', 'type': 'str'}, + 'cache_control': {'key': 'Cache-Control', 'type': 'str'}, + 'blob_sequence_number': {'key': 'x-ms-blob-sequence-number', 'type': 'long'}, + 'copy_id': {'key': 'CopyId', 'type': 'str'}, + 'copy_source': {'key': 'CopySource', 'type': 'str'}, + 'copy_progress': {'key': 'CopyProgress', 'type': 'str'}, + 'copy_completion_time': {'key': 'CopyCompletionTime', 'type': 'rfc-1123'}, + 'copy_status_description': {'key': 'CopyStatusDescription', 'type': 'str'}, + 'server_encrypted': {'key': 'ServerEncrypted', 'type': 'bool'}, + 'incremental_copy': {'key': 'IncrementalCopy', 'type': 'bool'}, + 'destination_snapshot': {'key': 'DestinationSnapshot', 'type': 'str'}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, + 'access_tier_inferred': {'key': 'AccessTierInferred', 'type': 'bool'}, + 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str'}, + 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str'}, + 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, + 'tag_count': {'key': 'TagCount', 'type': 'int'}, + 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123'}, + 'is_sealed': {'key': 'Sealed', 'type': 'bool'}, + 'last_accessed_on': {'key': 'LastAccessTime', 'type': 'rfc-1123'}, + 'delete_time': {'key': 'DeleteTime', 'type': 'rfc-1123'}, + } + _xml_map = { + 'name': 'Properties' + } + + def __init__( + self, + *, + last_modified: datetime.datetime, + etag: str, + creation_time: Optional[datetime.datetime] = None, + content_length: Optional[int] = None, + content_type: Optional[str] = None, + content_encoding: Optional[str] = None, + content_language: Optional[str] = None, + content_md5: Optional[bytearray] = None, + content_disposition: Optional[str] = None, + cache_control: Optional[str] = None, + blob_sequence_number: Optional[int] = None, + copy_id: Optional[str] = None, + copy_source: Optional[str] = None, + copy_progress: Optional[str] = None, + copy_completion_time: Optional[datetime.datetime] = None, + copy_status_description: Optional[str] = None, + server_encrypted: Optional[bool] = None, + incremental_copy: Optional[bool] = None, + destination_snapshot: Optional[str] = None, + deleted_time: Optional[datetime.datetime] = None, + remaining_retention_days: Optional[int] = None, + access_tier_inferred: Optional[bool] = None, + customer_provided_key_sha256: Optional[str] = None, + encryption_scope: Optional[str] = None, + access_tier_change_time: Optional[datetime.datetime] = None, + tag_count: Optional[int] = None, + expires_on: Optional[datetime.datetime] = None, + is_sealed: Optional[bool] = None, + last_accessed_on: Optional[datetime.datetime] = None, + delete_time: Optional[datetime.datetime] = None, + **kwargs + ): + super(BlobPropertiesInternal, self).__init__(**kwargs) + self.creation_time = creation_time + self.last_modified = last_modified + self.etag = etag + self.content_length = content_length + self.content_type = content_type + self.content_encoding = content_encoding + self.content_language = content_language + self.content_md5 = content_md5 + self.content_disposition = content_disposition + self.cache_control = cache_control + self.blob_sequence_number = blob_sequence_number + self.copy_id = copy_id + self.copy_source = copy_source + self.copy_progress = copy_progress + self.copy_completion_time = copy_completion_time + self.copy_status_description = copy_status_description + self.server_encrypted = server_encrypted + self.incremental_copy = incremental_copy + self.destination_snapshot = destination_snapshot + self.deleted_time = deleted_time + self.remaining_retention_days = remaining_retention_days + self.access_tier_inferred = access_tier_inferred + self.customer_provided_key_sha256 = customer_provided_key_sha256 + self.encryption_scope = encryption_scope + self.access_tier_change_time = access_tier_change_time + self.tag_count = tag_count + self.expires_on = expires_on + self.is_sealed = is_sealed + self.last_accessed_on = last_accessed_on + self.delete_time = delete_time + + +class FileSystem(msrest.serialization.Model): + """FileSystem. + + :param name: + :type name: str + :param last_modified: + :type last_modified: str + :param e_tag: + :type e_tag: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'last_modified': {'key': 'lastModified', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + } + + def __init__( + self, + *, + name: Optional[str] = None, + last_modified: Optional[str] = None, + e_tag: Optional[str] = None, + **kwargs + ): + super(FileSystem, self).__init__(**kwargs) + self.name = name + self.last_modified = last_modified + self.e_tag = e_tag + + +class FileSystemList(msrest.serialization.Model): + """FileSystemList. + + :param filesystems: + :type filesystems: list[~azure.storage.filedatalake.models.FileSystem] + """ + + _attribute_map = { + 'filesystems': {'key': 'filesystems', 'type': '[FileSystem]'}, + } + + def __init__( + self, + *, + filesystems: Optional[List["FileSystem"]] = None, + **kwargs + ): + super(FileSystemList, self).__init__(**kwargs) + self.filesystems = filesystems + + +class LeaseAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param lease_id: If specified, the operation only succeeds if the resource's lease is active + and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': 'leaseId', 'type': 'str'}, + } + + def __init__( + self, + *, + lease_id: Optional[str] = None, + **kwargs + ): + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = lease_id + + +class ListBlobsHierarchySegmentResponse(msrest.serialization.Model): + """An enumeration of blobs. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param container_name: Required. + :type container_name: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param delimiter: + :type delimiter: str + :param segment: Required. + :type segment: ~azure.storage.filedatalake.models.BlobHierarchyListSegment + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'container_name': {'required': True}, + 'segment': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'delimiter': {'key': 'Delimiter', 'type': 'str'}, + 'segment': {'key': 'Segment', 'type': 'BlobHierarchyListSegment'}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + *, + service_endpoint: str, + container_name: str, + segment: "BlobHierarchyListSegment", + prefix: Optional[str] = None, + marker: Optional[str] = None, + max_results: Optional[int] = None, + delimiter: Optional[str] = None, + next_marker: Optional[str] = None, + **kwargs + ): + super(ListBlobsHierarchySegmentResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.container_name = container_name + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.delimiter = delimiter + self.segment = segment + self.next_marker = next_marker + + +class ModifiedAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param if_modified_since: Specify this header value to operate only on a blob if it has been + modified since the specified date/time. + :type if_modified_since: ~datetime.datetime + :param if_unmodified_since: Specify this header value to operate only on a blob if it has not + been modified since the specified date/time. + :type if_unmodified_since: ~datetime.datetime + :param if_match: Specify an ETag value to operate only on blobs with a matching value. + :type if_match: str + :param if_none_match: Specify an ETag value to operate only on blobs without a matching value. + :type if_none_match: str + """ + + _attribute_map = { + 'if_modified_since': {'key': 'ifModifiedSince', 'type': 'rfc-1123'}, + 'if_unmodified_since': {'key': 'ifUnmodifiedSince', 'type': 'rfc-1123'}, + 'if_match': {'key': 'ifMatch', 'type': 'str'}, + 'if_none_match': {'key': 'ifNoneMatch', 'type': 'str'}, + } + + def __init__( + self, + *, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + **kwargs + ): + super(ModifiedAccessConditions, self).__init__(**kwargs) + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since + self.if_match = if_match + self.if_none_match = if_none_match + + +class Path(msrest.serialization.Model): + """Path. + + :param name: + :type name: str + :param is_directory: + :type is_directory: bool + :param last_modified: + :type last_modified: str + :param e_tag: + :type e_tag: str + :param content_length: + :type content_length: long + :param owner: + :type owner: str + :param group: + :type group: str + :param permissions: + :type permissions: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, + 'last_modified': {'key': 'lastModified', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, + 'content_length': {'key': 'contentLength', 'type': 'long'}, + 'owner': {'key': 'owner', 'type': 'str'}, + 'group': {'key': 'group', 'type': 'str'}, + 'permissions': {'key': 'permissions', 'type': 'str'}, + } + + def __init__( + self, + *, + name: Optional[str] = None, + is_directory: Optional[bool] = False, + last_modified: Optional[str] = None, + e_tag: Optional[str] = None, + content_length: Optional[int] = None, + owner: Optional[str] = None, + group: Optional[str] = None, + permissions: Optional[str] = None, + **kwargs + ): + super(Path, self).__init__(**kwargs) + self.name = name + self.is_directory = is_directory + self.last_modified = last_modified + self.e_tag = e_tag + self.content_length = content_length + self.owner = owner + self.group = group + self.permissions = permissions + + +class PathHTTPHeaders(msrest.serialization.Model): + """Parameter group. + + :param cache_control: Optional. Sets the blob's cache control. If specified, this property is + stored with the blob and returned with a read request. + :type cache_control: str + :param content_encoding: Optional. Sets the blob's content encoding. If specified, this + property is stored with the blob and returned with a read request. + :type content_encoding: str + :param content_language: Optional. Set the blob's content language. If specified, this property + is stored with the blob and returned with a read request. + :type content_language: str + :param content_disposition: Optional. Sets the blob's Content-Disposition header. + :type content_disposition: str + :param content_type: Optional. Sets the blob's content type. If specified, this property is + stored with the blob and returned with a read request. + :type content_type: str + :param content_md5: Specify the transactional md5 for the body, to be validated by the service. + :type content_md5: bytearray + :param transactional_content_hash: Specify the transactional md5 for the body, to be validated + by the service. + :type transactional_content_hash: bytearray + """ + + _attribute_map = { + 'cache_control': {'key': 'cacheControl', 'type': 'str'}, + 'content_encoding': {'key': 'contentEncoding', 'type': 'str'}, + 'content_language': {'key': 'contentLanguage', 'type': 'str'}, + 'content_disposition': {'key': 'contentDisposition', 'type': 'str'}, + 'content_type': {'key': 'contentType', 'type': 'str'}, + 'content_md5': {'key': 'contentMD5', 'type': 'bytearray'}, + 'transactional_content_hash': {'key': 'transactionalContentHash', 'type': 'bytearray'}, + } + + def __init__( + self, + *, + cache_control: Optional[str] = None, + content_encoding: Optional[str] = None, + content_language: Optional[str] = None, + content_disposition: Optional[str] = None, + content_type: Optional[str] = None, + content_md5: Optional[bytearray] = None, + transactional_content_hash: Optional[bytearray] = None, + **kwargs + ): + super(PathHTTPHeaders, self).__init__(**kwargs) + self.cache_control = cache_control + self.content_encoding = content_encoding + self.content_language = content_language + self.content_disposition = content_disposition + self.content_type = content_type + self.content_md5 = content_md5 + self.transactional_content_hash = transactional_content_hash + + +class PathList(msrest.serialization.Model): + """PathList. + + :param paths: + :type paths: list[~azure.storage.filedatalake.models.Path] + """ + + _attribute_map = { + 'paths': {'key': 'paths', 'type': '[Path]'}, + } + + def __init__( + self, + *, + paths: Optional[List["Path"]] = None, + **kwargs + ): + super(PathList, self).__init__(**kwargs) + self.paths = paths + + +class SetAccessControlRecursiveResponse(msrest.serialization.Model): + """SetAccessControlRecursiveResponse. + + :param directories_successful: + :type directories_successful: int + :param files_successful: + :type files_successful: int + :param failure_count: + :type failure_count: int + :param failed_entries: + :type failed_entries: list[~azure.storage.filedatalake.models.AclFailedEntry] + """ + + _attribute_map = { + 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, + 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, + 'failure_count': {'key': 'failureCount', 'type': 'int'}, + 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, + } + + def __init__( + self, + *, + directories_successful: Optional[int] = None, + files_successful: Optional[int] = None, + failure_count: Optional[int] = None, + failed_entries: Optional[List["AclFailedEntry"]] = None, + **kwargs + ): + super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) + self.directories_successful = directories_successful + self.files_successful = files_successful + self.failure_count = failure_count + self.failed_entries = failed_entries + + +class SourceModifiedAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param source_if_match: Specify an ETag value to operate only on blobs with a matching value. + :type source_if_match: str + :param source_if_none_match: Specify an ETag value to operate only on blobs without a matching + value. + :type source_if_none_match: str + :param source_if_modified_since: Specify this header value to operate only on a blob if it has + been modified since the specified date/time. + :type source_if_modified_since: ~datetime.datetime + :param source_if_unmodified_since: Specify this header value to operate only on a blob if it + has not been modified since the specified date/time. + :type source_if_unmodified_since: ~datetime.datetime + """ + + _attribute_map = { + 'source_if_match': {'key': 'sourceIfMatch', 'type': 'str'}, + 'source_if_none_match': {'key': 'sourceIfNoneMatch', 'type': 'str'}, + 'source_if_modified_since': {'key': 'sourceIfModifiedSince', 'type': 'rfc-1123'}, + 'source_if_unmodified_since': {'key': 'sourceIfUnmodifiedSince', 'type': 'rfc-1123'}, + } + + def __init__( + self, + *, + source_if_match: Optional[str] = None, + source_if_none_match: Optional[str] = None, + source_if_modified_since: Optional[datetime.datetime] = None, + source_if_unmodified_since: Optional[datetime.datetime] = None, + **kwargs + ): + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_match = source_if_match + self.source_if_none_match = source_if_none_match + self.source_if_modified_since = source_if_modified_since + self.source_if_unmodified_since = source_if_unmodified_since + + +class StorageError(msrest.serialization.Model): + """StorageError. + + :param error: The service error response object. + :type error: ~azure.storage.filedatalake.models.StorageErrorError + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'StorageErrorError'}, + } + + def __init__( + self, + *, + error: Optional["StorageErrorError"] = None, + **kwargs + ): + super(StorageError, self).__init__(**kwargs) + self.error = error + + +class StorageErrorError(msrest.serialization.Model): + """The service error response object. + + :param code: The service error code. + :type code: str + :param message: The service error message. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'Code', 'type': 'str'}, + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__( + self, + *, + code: Optional[str] = None, + message: Optional[str] = None, + **kwargs + ): + super(StorageErrorError, self).__init__(**kwargs) + self.code = code + self.message = message diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/__init__.py new file mode 100644 index 0000000..0db71e0 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/__init__.py @@ -0,0 +1,17 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._file_system_operations import FileSystemOperations +from ._path_operations import PathOperations + +__all__ = [ + 'ServiceOperations', + 'FileSystemOperations', + 'PathOperations', +] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_file_system_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_file_system_operations.py new file mode 100644 index 0000000..991890a --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_file_system_operations.py @@ -0,0 +1,643 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class FileSystemOperations(object): + """FileSystemOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.filedatalake.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create( + self, + request_id_parameter=None, # type: Optional[str] + timeout=None, # type: Optional[int] + properties=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Create FileSystem. + + Create a FileSystem rooted at the specified location. If the FileSystem already exists, the + operation fails. This operation does not support conditional HTTP requests. + + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param properties: Optional. User-defined properties to be stored with the filesystem, in the + format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value + is a base64 encoded string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties not included in the list + will be removed. All properties are removed if the header is omitted. To merge new and + existing properties, first get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all properties. + :type properties: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/json" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{filesystem}'} # type: ignore + + def set_properties( + self, + request_id_parameter=None, # type: Optional[str] + timeout=None, # type: Optional[int] + properties=None, # type: Optional[str] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Set FileSystem Properties. + + Set properties for the FileSystem. This operation supports conditional HTTP requests. For + more information, see `Specifying Conditional Headers for Blob Service Operations + `_. + + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param properties: Optional. User-defined properties to be stored with the filesystem, in the + format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value + is a base64 encoded string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties not included in the list + will be removed. All properties are removed if the header is omitted. To merge new and + existing properties, first get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all properties. + :type properties: str + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + accept = "application/json" + + # Construct URL + url = self.set_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {'url': '/{filesystem}'} # type: ignore + + def get_properties( + self, + request_id_parameter=None, # type: Optional[str] + timeout=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Get FileSystem Properties. + + All system and user-defined filesystem properties are specified in the response headers. + + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/json" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) + response_headers['x-ms-namespace-enabled']=self._deserialize('str', response.headers.get('x-ms-namespace-enabled')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{filesystem}'} # type: ignore + + def delete( + self, + request_id_parameter=None, # type: Optional[str] + timeout=None, # type: Optional[int] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Delete FileSystem. + + Marks the FileSystem for deletion. When a FileSystem is deleted, a FileSystem with the same + identifier cannot be created for at least 30 seconds. While the filesystem is being deleted, + attempts to create a filesystem with the same identifier will fail with status code 409 + (Conflict), with the service returning additional error information indicating that the + filesystem is being deleted. All other operations, including operations on any files or + directories within the filesystem, will fail with status code 404 (Not Found) while the + filesystem is being deleted. This operation supports conditional HTTP requests. For more + information, see `Specifying Conditional Headers for Blob Service Operations + `_. + + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + accept = "application/json" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{filesystem}'} # type: ignore + + def list_paths( + self, + recursive, # type: bool + request_id_parameter=None, # type: Optional[str] + timeout=None, # type: Optional[int] + continuation=None, # type: Optional[str] + path=None, # type: Optional[str] + max_results=None, # type: Optional[int] + upn=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> Iterable["_models.PathList"] + """List Paths. + + List FileSystem paths and their properties. + + :param recursive: Required. + :type recursive: bool + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param continuation: Optional. When deleting a directory, the number of paths that are deleted + with each invocation is limited. If the number of paths to be deleted exceeds this limit, a + continuation token is returned in this response header. When a continuation token is returned + in the response, it must be specified in a subsequent invocation of the delete operation to + continue deleting the directory. + :type continuation: str + :param path: Optional. Filters results to paths within the specified directory. An error + occurs if the directory does not exist. + :type path: str + :param max_results: An optional value that specifies the maximum number of items to return. If + omitted or greater than 5,000, the response will include up to 5,000 items. + :type max_results: int + :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If + "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response + headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If + "false", the values will be returned as Azure Active Directory Object IDs. The default value is + false. Note that group and application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either PathList or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.models.PathList] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.PathList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/json" + + # TODO: change this once continuation/next_link autorest PR is merged + def prepare_request(next_link=None, cont_token=None): + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", + request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, + 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + if not next_link: + # Construct URL + url = self.list_paths.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['resource'] = self._serialize.query("self._config.resource", self._config.resource, + 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + # TODO: change this once continuation/next_link autorest PR is merged + if cont_token is not None: + query_parameters['continuation'] = self._serialize.query("continuation", cont_token, 'str') + if path is not None: + query_parameters['directory'] = self._serialize.query("path", path, 'str') + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + if max_results is not None: + query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + + request = self._client.get(url, query_parameters, header_parameters) + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def extract_data(pipeline_response): + # TODO: change this once continuation/next_link autorest PR is merged + try: + cont_token = pipeline_response.http_response.headers['x-ms-continuation'] + except KeyError: + cont_token = None + deserialized = self._deserialize('PathList', pipeline_response) + list_of_elem = deserialized.paths + if cls: + list_of_elem = cls(list_of_elem) + # TODO: change this once continuation/next_link autorest PR is merged + return cont_token, iter(list_of_elem) + + # TODO: change this once continuation/next_link autorest PR is merged + def get_next(cont_token=None): + cont_token = cont_token if not continuation else continuation + request = prepare_request(cont_token=cont_token) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged( + get_next, extract_data + ) + + list_paths.metadata = {'url': '/{filesystem}'} # type: ignore + + def list_blob_hierarchy_segment( + self, + prefix=None, # type: Optional[str] + delimiter=None, # type: Optional[str] + marker=None, # type: Optional[str] + max_results=None, # type: Optional[int] + include=None, # type: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] + showonly="deleted", # type: Optional[str] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.ListBlobsHierarchySegmentResponse" + """The List Blobs operation returns a list of the blobs under the specified container. + + :param prefix: Filters results to filesystems within the specified prefix. + :type prefix: str + :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix + element in the response body that acts as a placeholder for all blobs whose names begin with + the same substring up to the appearance of the delimiter character. The delimiter may be a + single character or a string. + :type delimiter: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. + :type marker: str + :param max_results: An optional value that specifies the maximum number of items to return. If + omitted or greater than 5,000, the response will include up to 5,000 items. + :type max_results: int + :param include: Include this parameter to specify one or more datasets to include in the + response. + :type include: list[str or ~azure.storage.filedatalake.models.ListBlobsIncludeItem] + :param showonly: Include this parameter to specify one or more datasets to include in the + response. + :type showonly: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListBlobsHierarchySegmentResponse, or the result of cls(response) + :rtype: ~azure.storage.filedatalake.models.ListBlobsHierarchySegmentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListBlobsHierarchySegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "container" + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_blob_hierarchy_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if delimiter is not None: + query_parameters['delimiter'] = self._serialize.query("delimiter", delimiter, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if max_results is not None: + query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + if showonly is not None: + query_parameters['showonly'] = self._serialize.query("showonly", showonly, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListBlobsHierarchySegmentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_blob_hierarchy_segment.metadata = {'url': '/{filesystem}'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_path_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_path_operations.py new file mode 100644 index 0000000..5517c96 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_path_operations.py @@ -0,0 +1,1789 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class PathOperations(object): + """PathOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.filedatalake.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create( + self, + request_id_parameter=None, # type: Optional[str] + timeout=None, # type: Optional[int] + resource=None, # type: Optional[Union[str, "_models.PathResourceType"]] + continuation=None, # type: Optional[str] + mode=None, # type: Optional[Union[str, "_models.PathRenameMode"]] + rename_source=None, # type: Optional[str] + source_lease_id=None, # type: Optional[str] + properties=None, # type: Optional[str] + permissions=None, # type: Optional[str] + umask=None, # type: Optional[str] + path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Create File | Create Directory | Rename File | Rename Directory. + + Create or rename a file or directory. By default, the destination is overwritten and if the + destination already exists and has a lease the lease is broken. This operation supports + conditional HTTP requests. For more information, see `Specifying Conditional Headers for Blob + Service Operations `_. To fail if the destination already exists, + use a conditional request with If-None-Match: "*". + + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param resource: Required only for Create File and Create Directory. The value must be "file" + or "directory". + :type resource: str or ~azure.storage.filedatalake.models.PathResourceType + :param continuation: Optional. When deleting a directory, the number of paths that are deleted + with each invocation is limited. If the number of paths to be deleted exceeds this limit, a + continuation token is returned in this response header. When a continuation token is returned + in the response, it must be specified in a subsequent invocation of the delete operation to + continue deleting the directory. + :type continuation: str + :param mode: Optional. Valid only when namespace is enabled. This parameter determines the + behavior of the rename operation. The value must be "legacy" or "posix", and the default value + will be "posix". + :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode + :param rename_source: An optional file or directory to be renamed. The value must have the + following format: "/{filesystem}/{path}". If "x-ms-properties" is specified, the properties + will overwrite the existing properties; otherwise, the existing properties will be preserved. + This value must be a URL percent-encoded string. Note that the string may only contain ASCII + characters in the ISO-8859-1 character set. + :type rename_source: str + :param source_lease_id: A lease ID for the source path. If specified, the source path must have + an active lease and the lease ID must match. + :type source_lease_id: str + :param properties: Optional. User-defined properties to be stored with the filesystem, in the + format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value + is a base64 encoded string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties not included in the list + will be removed. All properties are removed if the header is omitted. To merge new and + existing properties, first get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all properties. + :type properties: str + :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type permissions: str + :param umask: Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, the umask + restricts the permissions of the file or directory to be created. The resulting permission is + given by p bitwise and not u, where p is the permission and u is the umask. For example, if p + is 0777 and u is 0057, then the resulting permission is 0720. The default permission is 0777 + for a directory and 0666 for a file. The default umask is 0027. The umask must be specified + in 4-digit octal notation (e.g. 0766). + :type umask: str + :param path_http_headers: Parameter group. + :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.filedatalake.models.SourceModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _cache_control = None + _content_encoding = None + _content_language = None + _content_disposition = None + _content_type = None + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _source_if_modified_since = None + _source_if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if path_http_headers is not None: + _cache_control = path_http_headers.cache_control + _content_encoding = path_http_headers.content_encoding + _content_language = path_http_headers.content_language + _content_disposition = path_http_headers.content_disposition + _content_type = path_http_headers.content_type + if source_modified_access_conditions is not None: + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + accept = "application/json" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if resource is not None: + query_parameters['resource'] = self._serialize.query("resource", resource, 'str') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if mode is not None: + query_parameters['mode'] = self._serialize.query("mode", mode, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if rename_source is not None: + header_parameters['x-ms-rename-source'] = self._serialize.header("rename_source", rename_source, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if source_lease_id is not None: + header_parameters['x-ms-source-lease-id'] = self._serialize.header("source_lease_id", source_lease_id, 'str') + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if umask is not None: + header_parameters['x-ms-umask'] = self._serialize.header("umask", umask, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if _source_if_match is not None: + header_parameters['x-ms-source-if-match'] = self._serialize.header("source_if_match", _source_if_match, 'str') + if _source_if_none_match is not None: + header_parameters['x-ms-source-if-none-match'] = self._serialize.header("source_if_none_match", _source_if_none_match, 'str') + if _source_if_modified_since is not None: + header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", _source_if_modified_since, 'rfc-1123') + if _source_if_unmodified_since is not None: + header_parameters['x-ms-source-if-unmodified-since'] = self._serialize.header("source_if_unmodified_since", _source_if_unmodified_since, 'rfc-1123') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def update( + self, + action, # type: Union[str, "_models.PathUpdateAction"] + mode, # type: Union[str, "_models.PathSetAccessControlRecursiveMode"] + body, # type: IO + request_id_parameter=None, # type: Optional[str] + timeout=None, # type: Optional[int] + max_records=None, # type: Optional[int] + continuation=None, # type: Optional[str] + force_flag=None, # type: Optional[bool] + position=None, # type: Optional[int] + retain_uncommitted_data=None, # type: Optional[bool] + close=None, # type: Optional[bool] + content_length=None, # type: Optional[int] + properties=None, # type: Optional[str] + owner=None, # type: Optional[str] + group=None, # type: Optional[str] + permissions=None, # type: Optional[str] + acl=None, # type: Optional[str] + path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> Optional["_models.SetAccessControlRecursiveResponse"] + """Append Data | Flush Data | Set Properties | Set Access Control. + + Uploads data to be appended to a file, flushes (writes) previously uploaded data to a file, + sets properties for a file or directory, or sets access control for a file or directory. Data + can only be appended to a file. Concurrent writes to the same file using multiple clients are + not supported. This operation supports conditional HTTP requests. For more information, see + `Specifying Conditional Headers for Blob Service Operations `_. + + :param action: The action must be "append" to upload data to be appended to a file, "flush" to + flush previously uploaded data to a file, "setProperties" to set the properties of a file or + directory, "setAccessControl" to set the owner, group, permissions, or access control list for + a file or directory, or "setAccessControlRecursive" to set the access control list for a + directory recursively. Note that Hierarchical Namespace must be enabled for the account in + order to use access control. Also note that the Access Control List (ACL) includes permissions + for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers + are mutually exclusive. + :type action: str or ~azure.storage.filedatalake.models.PathUpdateAction + :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" + modifies one or more POSIX access control rights that pre-exist on files and directories, + "remove" removes one or more POSIX access control rights that were present earlier on files + and directories. + :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode + :param body: Initial data. + :type body: IO + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param max_records: Optional. Valid for "SetAccessControlRecursive" operation. It specifies the + maximum number of files or directories on which the acl change will be applied. If omitted or + greater than 2,000, the request will process up to 2,000 items. + :type max_records: int + :param continuation: Optional. The number of paths processed with each invocation is limited. + If the number of paths to be processed exceeds this limit, a continuation token is returned in + the response header x-ms-continuation. When a continuation token is returned in the response, + it must be percent-encoded and specified in a subsequent invocation of setAcessControlRecursive + operation. + :type continuation: str + :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, + the operation will terminate quickly on encountering user errors (4XX). If true, the operation + will ignore user errors and proceed with the operation on other sub-entities of the directory. + Continuation token will only be returned when forceFlag is true in case of user errors. If not + set the default value is false for this. + :type force_flag: bool + :param position: This parameter allows the caller to upload data in parallel and control the + order in which it is appended to the file. It is required when uploading data to be appended + to the file and when flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not immediately flushed, or + written, to the file. To flush, the previously uploaded data must be contiguous, the position + parameter must be specified and equal to the length of the file after all data has been + written, and there must not be a request entity body included with the request. + :type position: long + :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data + is retained after the flush operation completes; otherwise, the uncommitted data is deleted + after the flush operation. The default is false. Data at offsets less than the specified + position are written to the file when flush succeeds, but this optional parameter allows data + after the flush position to be retained for a future flush operation. + :type retain_uncommitted_data: bool + :param close: Azure Storage Events allow applications to receive notifications when files + change. When Azure Storage Events are enabled, a file changed event is raised. This event has a + property indicating whether this is the final change to distinguish the difference between an + intermediate flush to a file stream and the final close of a file stream. The close query + parameter is valid only when the action is "flush" and change notifications are enabled. If the + value of close is "true" and the flush operation completes successfully, the service raises a + file change notification with a property indicating that this is the final update (the file + stream has been closed). If "false" a change notification is raised indicating the file has + changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to + indicate that the file stream has been closed.". + :type close: bool + :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush + Data". Must be the length of the request content in bytes for "Append Data". + :type content_length: long + :param properties: Optional. User-defined properties to be stored with the filesystem, in the + format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", where each value + is a base64 encoded string. Note that the string may only contain ASCII characters in the + ISO-8859-1 character set. If the filesystem exists, any properties not included in the list + will be removed. All properties are removed if the header is omitted. To merge new and + existing properties, first get all existing properties and the current E-Tag, then make a + conditional request with the E-Tag and include values for all properties. + :type properties: str + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type permissions: str + :param acl: Sets POSIX access control rights on files and directories. The value is a comma- + separated list of access control entries. Each access control entry (ACE) consists of a scope, + a type, a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param path_http_headers: Parameter group. + :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SetAccessControlRecursiveResponse, or the result of cls(response) + :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse or None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SetAccessControlRecursiveResponse"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _content_md5 = None + _lease_id = None + _cache_control = None + _content_type = None + _content_disposition = None + _content_encoding = None + _content_language = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if path_http_headers is not None: + _content_md5 = path_http_headers.content_md5 + _cache_control = path_http_headers.cache_control + _content_type = path_http_headers.content_type + _content_disposition = path_http_headers.content_disposition + _content_encoding = path_http_headers.content_encoding + _content_language = path_http_headers.content_language + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/json" + + # Construct URL + url = self.update.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['action'] = self._serialize.query("action", action, 'str') + if max_records is not None: + query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + query_parameters['mode'] = self._serialize.query("mode", mode, 'str') + if force_flag is not None: + query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if retain_uncommitted_data is not None: + query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') + if close is not None: + query_parameters['close'] = self._serialize.query("close", close, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if _content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if properties is not None: + header_parameters['x-ms-properties'] = self._serialize.header("properties", properties, 'str') + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + deserialized = None + if response.status_code == 200: + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) + response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) + response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) + + if response.status_code == 202: + response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + update.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def lease( + self, + x_ms_lease_action, # type: Union[str, "_models.PathLeaseAction"] + request_id_parameter=None, # type: Optional[str] + timeout=None, # type: Optional[int] + x_ms_lease_duration=None, # type: Optional[int] + x_ms_lease_break_period=None, # type: Optional[int] + proposed_lease_id=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Lease Path. + + Create and manage a lease to restrict write and delete access to the path. This operation + supports conditional HTTP requests. For more information, see `Specifying Conditional Headers + for Blob Service Operations `_. + + :param x_ms_lease_action: There are five lease actions: "acquire", "break", "change", "renew", + and "release". Use "acquire" and specify the "x-ms-proposed-lease-id" and "x-ms-lease-duration" + to acquire a new lease. Use "break" to break an existing lease. When a lease is broken, the + lease break period is allowed to elapse, during which time no lease operation except break and + release can be performed on the file. When a lease is successfully broken, the response + indicates the interval in seconds until a new lease can be acquired. Use "change" and specify + the current lease ID in "x-ms-lease-id" and the new lease ID in "x-ms-proposed-lease-id" to + change the lease ID of an active lease. Use "renew" and specify the "x-ms-lease-id" to renew an + existing lease. Use "release" and specify the "x-ms-lease-id" to release a lease. + :type x_ms_lease_action: str or ~azure.storage.filedatalake.models.PathLeaseAction + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies + the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or + -1 for infinite lease. + :type x_ms_lease_duration: int + :param x_ms_lease_break_period: The lease break period duration is optional to break a lease, + and specifies the break period of the lease in seconds. The lease break duration must be + between 0 and 60 seconds. + :type x_ms_lease_break_period: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + accept = "application/json" + + # Construct URL + url = self.lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['x-ms-lease-action'] = self._serialize.header("x_ms_lease_action", x_ms_lease_action, 'str') + if x_ms_lease_duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("x_ms_lease_duration", x_ms_lease_duration, 'int') + if x_ms_lease_break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("x_ms_lease_break_period", x_ms_lease_break_period, 'int') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + + if response.status_code == 201: + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + + if response.status_code == 202: + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-lease-time']=self._deserialize('str', response.headers.get('x-ms-lease-time')) + + if cls: + return cls(pipeline_response, None, response_headers) + + lease.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def read( + self, + request_id_parameter=None, # type: Optional[str] + timeout=None, # type: Optional[int] + range=None, # type: Optional[str] + x_ms_range_get_content_md5=None, # type: Optional[bool] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> IO + """Read File. + + Read the contents of a file. For read operations, range requests are supported. This operation + supports conditional HTTP requests. For more information, see `Specifying Conditional Headers + for Blob Service Operations `_. + + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param range: The HTTP Range request header specifies one or more byte ranges of the resource + to be retrieved. + :type range: str + :param x_ms_range_get_content_md5: Optional. When this header is set to "true" and specified + together with the Range header, the service returns the MD5 hash for the range, as long as the + range is less than or equal to 4MB in size. If this header is specified without the Range + header, the service returns status code 400 (Bad Request). If this header is set to true when + the range exceeds 4 MB in size, the service returns status code 400 (Bad Request). + :type x_ms_range_get_content_md5: bool + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + accept = "application/json" + + # Construct URL + url = self.read.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if range is not None: + header_parameters['Range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if x_ms_range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("x_ms_range_get_content_md5", x_ms_range_get_content_md5, 'bool') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) + response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) + response_headers['x-ms-content-md5']=self._deserialize('str', response.headers.get('x-ms-content-md5')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) + response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + read.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def get_properties( + self, + request_id_parameter=None, # type: Optional[str] + timeout=None, # type: Optional[int] + action=None, # type: Optional[Union[str, "_models.PathGetPropertiesAction"]] + upn=None, # type: Optional[bool] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Get Properties | Get Status | Get Access Control List. + + Get Properties returns all system and user defined properties for a path. Get Status returns + all system defined properties for a path. Get Access Control List returns the access control + list for a path. This operation supports conditional HTTP requests. For more information, see + `Specifying Conditional Headers for Blob Service Operations `_. + + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param action: Optional. If the value is "getStatus" only the system defined properties for the + path are returned. If the value is "getAccessControl" the access control list is returned in + the response headers (Hierarchical Namespace must be enabled for the account), otherwise the + properties are returned. + :type action: str or ~azure.storage.filedatalake.models.PathGetPropertiesAction + :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If + "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response + headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If + "false", the values will be returned as Azure Active Directory Object IDs. The default value is + false. Note that group and application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + accept = "application/json" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if action is not None: + query_parameters['action'] = self._serialize.query("action", action, 'str') + if upn is not None: + query_parameters['upn'] = self._serialize.query("upn", upn, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-MD5']=self._deserialize('str', response.headers.get('Content-MD5')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) + response_headers['x-ms-properties']=self._deserialize('str', response.headers.get('x-ms-properties')) + response_headers['x-ms-owner']=self._deserialize('str', response.headers.get('x-ms-owner')) + response_headers['x-ms-group']=self._deserialize('str', response.headers.get('x-ms-group')) + response_headers['x-ms-permissions']=self._deserialize('str', response.headers.get('x-ms-permissions')) + response_headers['x-ms-acl']=self._deserialize('str', response.headers.get('x-ms-acl')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def delete( + self, + request_id_parameter=None, # type: Optional[str] + timeout=None, # type: Optional[int] + recursive=None, # type: Optional[bool] + continuation=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Delete File | Delete Directory. + + Delete the file or directory. This operation supports conditional HTTP requests. For more + information, see `Specifying Conditional Headers for Blob Service Operations + `_. + + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param recursive: Required. + :type recursive: bool + :param continuation: Optional. When deleting a directory, the number of paths that are deleted + with each invocation is limited. If the number of paths to be deleted exceeds this limit, a + continuation token is returned in this response header. When a continuation token is returned + in the response, it must be specified in a subsequent invocation of the delete operation to + continue deleting the directory. + :type continuation: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + accept = "application/json" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if recursive is not None: + query_parameters['recursive'] = self._serialize.query("recursive", recursive, 'bool') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) + response_headers['x-ms-deletion-id']=self._deserialize('str', response.headers.get('x-ms-deletion-id')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def set_access_control( + self, + timeout=None, # type: Optional[int] + owner=None, # type: Optional[str] + group=None, # type: Optional[str] + permissions=None, # type: Optional[str] + acl=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Set the owner, group, permissions, or access control list for a path. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param owner: Optional. The owner of the blob or directory. + :type owner: str + :param group: Optional. The owning group of the blob or directory. + :type group: str + :param permissions: Optional and only valid if Hierarchical Namespace is enabled for the + account. Sets POSIX access permissions for the file owner, the file owning group, and others. + Each class may be granted read, write, or execute permission. The sticky bit is also + supported. Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. + :type permissions: str + :param acl: Sets POSIX access control rights on files and directories. The value is a comma- + separated list of access control entries. Each access control entry (ACE) consists of a scope, + a type, a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + action = "setAccessControl" + accept = "application/json" + + # Construct URL + url = self.set_access_control.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if owner is not None: + header_parameters['x-ms-owner'] = self._serialize.header("owner", owner, 'str') + if group is not None: + header_parameters['x-ms-group'] = self._serialize.header("group", group, 'str') + if permissions is not None: + header_parameters['x-ms-permissions'] = self._serialize.header("permissions", permissions, 'str') + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_control.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def set_access_control_recursive( + self, + mode, # type: Union[str, "_models.PathSetAccessControlRecursiveMode"] + timeout=None, # type: Optional[int] + continuation=None, # type: Optional[str] + force_flag=None, # type: Optional[bool] + max_records=None, # type: Optional[int] + acl=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.SetAccessControlRecursiveResponse" + """Set the access control list for a path and subpaths. + + :param mode: Mode "set" sets POSIX access control rights on files and directories, "modify" + modifies one or more POSIX access control rights that pre-exist on files and directories, + "remove" removes one or more POSIX access control rights that were present earlier on files + and directories. + :type mode: str or ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param continuation: Optional. When deleting a directory, the number of paths that are deleted + with each invocation is limited. If the number of paths to be deleted exceeds this limit, a + continuation token is returned in this response header. When a continuation token is returned + in the response, it must be specified in a subsequent invocation of the delete operation to + continue deleting the directory. + :type continuation: str + :param force_flag: Optional. Valid for "SetAccessControlRecursive" operation. If set to false, + the operation will terminate quickly on encountering user errors (4XX). If true, the operation + will ignore user errors and proceed with the operation on other sub-entities of the directory. + Continuation token will only be returned when forceFlag is true in case of user errors. If not + set the default value is false for this. + :type force_flag: bool + :param max_records: Optional. It specifies the maximum number of files or directories on which + the acl change will be applied. If omitted or greater than 2,000, the request will process up + to 2,000 items. + :type max_records: int + :param acl: Sets POSIX access control rights on files and directories. The value is a comma- + separated list of access control entries. Each access control entry (ACE) consists of a scope, + a type, a user or group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SetAccessControlRecursiveResponse, or the result of cls(response) + :rtype: ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.SetAccessControlRecursiveResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + action = "setAccessControlRecursive" + accept = "application/json" + + # Construct URL + url = self.set_access_control_recursive.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + query_parameters['mode'] = self._serialize.query("mode", mode, 'str') + if force_flag is not None: + query_parameters['forceFlag'] = self._serialize.query("force_flag", force_flag, 'bool') + if max_records is not None: + query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = self._deserialize('SetAccessControlRecursiveResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def flush_data( + self, + timeout=None, # type: Optional[int] + position=None, # type: Optional[int] + retain_uncommitted_data=None, # type: Optional[bool] + close=None, # type: Optional[bool] + content_length=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + modified_access_conditions=None, # type: Optional["_models.ModifiedAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Set the owner, group, permissions, or access control list for a path. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param position: This parameter allows the caller to upload data in parallel and control the + order in which it is appended to the file. It is required when uploading data to be appended + to the file and when flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not immediately flushed, or + written, to the file. To flush, the previously uploaded data must be contiguous, the position + parameter must be specified and equal to the length of the file after all data has been + written, and there must not be a request entity body included with the request. + :type position: long + :param retain_uncommitted_data: Valid only for flush operations. If "true", uncommitted data + is retained after the flush operation completes; otherwise, the uncommitted data is deleted + after the flush operation. The default is false. Data at offsets less than the specified + position are written to the file when flush succeeds, but this optional parameter allows data + after the flush position to be retained for a future flush operation. + :type retain_uncommitted_data: bool + :param close: Azure Storage Events allow applications to receive notifications when files + change. When Azure Storage Events are enabled, a file changed event is raised. This event has a + property indicating whether this is the final change to distinguish the difference between an + intermediate flush to a file stream and the final close of a file stream. The close query + parameter is valid only when the action is "flush" and change notifications are enabled. If the + value of close is "true" and the flush operation completes successfully, the service raises a + file change notification with a property indicating that this is the final update (the file + stream has been closed). If "false" a change notification is raised indicating the file has + changed. The default is false. This query parameter is set to true by the Hadoop ABFS driver to + indicate that the file stream has been closed.". + :type close: bool + :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush + Data". Must be the length of the request content in bytes for "Append Data". + :type content_length: long + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param path_http_headers: Parameter group. + :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. + :type modified_access_conditions: ~azure.storage.filedatalake.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _content_md5 = None + _lease_id = None + _cache_control = None + _content_type = None + _content_disposition = None + _content_encoding = None + _content_language = None + _if_match = None + _if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_none_match = modified_access_conditions.if_none_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if path_http_headers is not None: + _content_md5 = path_http_headers.content_md5 + _cache_control = path_http_headers.cache_control + _content_type = path_http_headers.content_type + _content_disposition = path_http_headers.content_disposition + _content_encoding = path_http_headers.content_encoding + _content_language = path_http_headers.content_language + action = "flush" + accept = "application/json" + + # Construct URL + url = self.flush_data.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if retain_uncommitted_data is not None: + query_parameters['retainUncommittedData'] = self._serialize.query("retain_uncommitted_data", retain_uncommitted_data, 'bool') + if close is not None: + query_parameters['close'] = self._serialize.query("close", close, 'bool') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if _content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("content_md5", _content_md5, 'bytearray') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if _cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("cache_control", _cache_control, 'str') + if _content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("content_type", _content_type, 'str') + if _content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("content_disposition", _content_disposition, 'str') + if _content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("content_encoding", _content_encoding, 'str') + if _content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("content_language", _content_language, 'str') + if _if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') + if _if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if _if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", _if_modified_since, 'rfc-1123') + if _if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", _if_unmodified_since, 'rfc-1123') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + flush_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def append_data( + self, + body, # type: IO + position=None, # type: Optional[int] + timeout=None, # type: Optional[int] + content_length=None, # type: Optional[int] + transactional_content_crc64=None, # type: Optional[bytearray] + request_id_parameter=None, # type: Optional[str] + path_http_headers=None, # type: Optional["_models.PathHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Append data to the file. + + :param body: Initial data. + :type body: IO + :param position: This parameter allows the caller to upload data in parallel and control the + order in which it is appended to the file. It is required when uploading data to be appended + to the file and when flushing previously uploaded data to the file. The value must be the + position where the data is to be appended. Uploaded data is not immediately flushed, or + written, to the file. To flush, the previously uploaded data must be contiguous, the position + parameter must be specified and equal to the length of the file after all data has been + written, and there must not be a request entity body included with the request. + :type position: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param content_length: Required for "Append Data" and "Flush Data". Must be 0 for "Flush + Data". Must be the length of the request content in bytes for "Append Data". + :type content_length: long + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. + :type transactional_content_crc64: bytearray + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param path_http_headers: Parameter group. + :type path_http_headers: ~azure.storage.filedatalake.models.PathHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.filedatalake.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _transactional_content_hash = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if path_http_headers is not None: + _transactional_content_hash = path_http_headers.transactional_content_hash + action = "append" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/json" + + # Construct URL + url = self.append_data.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['action'] = self._serialize.query("action", action, 'str') + if position is not None: + query_parameters['position'] = self._serialize.query("position", position, 'long') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if content_length is not None: + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) + if _transactional_content_hash is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_hash", _transactional_content_hash, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = body + request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + append_data.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def set_expiry( + self, + expiry_options, # type: Union[str, "_models.PathExpiryOptions"] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + expires_on=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Sets the time a blob will expire and be deleted. + + :param expiry_options: Required. Indicates mode of the expiry time. + :type expiry_options: str or ~azure.storage.filedatalake.models.PathExpiryOptions + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param expires_on: The time to set the blob to expiry. + :type expires_on: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "expiry" + accept = "application/json" + + # Construct URL + url = self.set_expiry.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') + if expires_on is not None: + header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_expiry.metadata = {'url': '/{filesystem}/{path}'} # type: ignore + + def undelete( + self, + timeout=None, # type: Optional[int] + undelete_source=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Undelete a path that was previously soft deleted. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :param undelete_source: Only for hierarchical namespace enabled accounts. Optional. The path of + the soft deleted blob to undelete. + :type undelete_source: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "undelete" + accept = "application/json" + + # Construct URL + url = self.undelete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if undelete_source is not None: + header_parameters['x-ms-undelete-source'] = self._serialize.header("undelete_source", undelete_source, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-resource-type']=self._deserialize('str', response.headers.get('x-ms-resource-type')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + undelete.metadata = {'url': '/{filesystem}/{path}'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_service_operations.py new file mode 100644 index 0000000..2db3801 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_generated/operations/_service_operations.py @@ -0,0 +1,153 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class ServiceOperations(object): + """ServiceOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.filedatalake.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list_file_systems( + self, + prefix=None, # type: Optional[str] + continuation=None, # type: Optional[str] + max_results=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + timeout=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Iterable["_models.FileSystemList"] + """List FileSystems. + + List filesystems and their properties in given account. + + :param prefix: Filters results to filesystems within the specified prefix. + :type prefix: str + :param continuation: Optional. When deleting a directory, the number of paths that are deleted + with each invocation is limited. If the number of paths to be deleted exceeds this limit, a + continuation token is returned in this response header. When a continuation token is returned + in the response, it must be specified in a subsequent invocation of the delete operation to + continue deleting the directory. + :type continuation: str + :param max_results: An optional value that specifies the maximum number of items to return. If + omitted or greater than 5,000, the response will include up to 5,000 items. + :type max_results: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting Timeouts for Blob Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either FileSystemList or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.models.FileSystemList] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.FileSystemList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + resource = "account" + accept = "application/json" + + def prepare_request(next_link=None): + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + if not next_link: + # Construct URL + url = self.list_file_systems.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['resource'] = self._serialize.query("resource", resource, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + if max_results is not None: + query_parameters['maxResults'] = self._serialize.query("max_results", max_results, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + request = self._client.get(url, query_parameters, header_parameters) + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize('FileSystemList', pipeline_response) + list_of_elem = deserialized.filesystems + if cls: + list_of_elem = cls(list_of_elem) + return None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged( + get_next, extract_data + ) + list_file_systems.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_list_paths_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_list_paths_helper.py new file mode 100644 index 0000000..543e1e1 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_list_paths_helper.py @@ -0,0 +1,108 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from azure.core.paging import PageIterator +from azure.core.exceptions import HttpResponseError +from ._deserialize import process_storage_error, get_deleted_path_properties_from_generated_code +from ._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix +from ._shared.models import DictMixin +from ._shared.response_handlers import return_context_and_deserialized + + +class DeletedPathPropertiesPaged(PageIterator): + """An Iterable of deleted path properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A path name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.filedatalake.DeletedPathProperties) + :ivar str container: The container that the paths are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + """ + def __init__( + self, command, + container=None, + prefix=None, + results_per_page=None, + continuation_token=None, + delimiter=None, + location_mode=None): + super(DeletedPathPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.container = container + self.delimiter = delimiter + self.current_page = None + self.location_mode = location_mode + + def _get_next_cb(self, continuation_token): + try: + return self._command( + prefix=self.prefix, + marker=continuation_token or None, + max_results=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.container = self._response.container_name + self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items + self.current_page = [self._build_item(item) for item in self.current_page] + self.delimiter = self._response.delimiter + + return self._response.next_marker or None, self.current_page + + def _build_item(self, item): + if isinstance(item, BlobItemInternal): + file_props = get_deleted_path_properties_from_generated_code(item) + file_props.file_system = self.container + return file_props + if isinstance(item, GenBlobPrefix): + return DirectoryPrefix( + container=self.container, + prefix=item.name, + results_per_page=self.results_per_page, + location_mode=self.location_mode) + return item + + +class DirectoryPrefix(DictMixin): + """Directory prefix. + + :ivar str name: Name of the deleted directory. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar str file_system: The file system that the deleted paths are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + """ + def __init__(self, **kwargs): + self.name = kwargs.get('prefix') + self.results_per_page = kwargs.get('results_per_page') + self.file_system = kwargs.get('container') + self.delimiter = kwargs.get('delimiter') + self.location_mode = kwargs.get('location_mode') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_models.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_models.py new file mode 100644 index 0000000..4517f46 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_models.py @@ -0,0 +1,1036 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines +from datetime import datetime +from enum import Enum + +from azure.multiapi.storagev2.blob.v2020_06_12 import LeaseProperties as BlobLeaseProperties +from azure.multiapi.storagev2.blob.v2020_06_12 import AccountSasPermissions as BlobAccountSasPermissions +from azure.multiapi.storagev2.blob.v2020_06_12 import ResourceTypes as BlobResourceTypes +from azure.multiapi.storagev2.blob.v2020_06_12 import UserDelegationKey as BlobUserDelegationKey +from azure.multiapi.storagev2.blob.v2020_06_12 import ContentSettings as BlobContentSettings +from azure.multiapi.storagev2.blob.v2020_06_12 import AccessPolicy as BlobAccessPolicy +from azure.multiapi.storagev2.blob.v2020_06_12 import DelimitedTextDialect as BlobDelimitedTextDialect +from azure.multiapi.storagev2.blob.v2020_06_12 import DelimitedJsonDialect as BlobDelimitedJSON +from azure.multiapi.storagev2.blob.v2020_06_12 import ArrowDialect as BlobArrowDialect +from azure.multiapi.storagev2.blob.v2020_06_12._models import ContainerPropertiesPaged +from azure.multiapi.storagev2.blob.v2020_06_12._generated.models import Logging as GenLogging, Metrics as GenMetrics, \ + RetentionPolicy as GenRetentionPolicy, StaticWebsite as GenStaticWebsite, CorsRule as GenCorsRule +from ._shared.models import DictMixin + + +class FileSystemProperties(object): + """File System properties class. + + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the file system was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar ~azure.storage.filedatalake.LeaseProperties lease: + Stores all the lease information for the file system. + :ivar str public_access: Specifies whether data in the file system may be accessed + publicly and the level of access. + :ivar bool has_immutability_policy: + Represents whether the file system has an immutability policy. + :ivar bool has_legal_hold: + Represents whether the file system has a legal hold. + :ivar dict metadata: A dict with name-value pairs to associate with the + file system as metadata. + :ivar bool deleted: + Whether this file system was deleted. + :ivar str deleted_version: + The version of a deleted file system. + + Returned ``FileSystemProperties`` instances expose these values through a + dictionary interface, for example: ``file_system_props["last_modified"]``. + Additionally, the file system name is available as ``file_system_props["name"]``. + """ + + def __init__(self): + self.name = None + self.last_modified = None + self.etag = None + self.lease = None + self.public_access = None + self.has_immutability_policy = None + self.has_legal_hold = None + self.metadata = None + self.deleted = None + self.deleted_version = None + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.last_modified = generated.properties.last_modified + props.deleted = generated.deleted + props.deleted_version = generated.version + props.etag = generated.properties.etag + props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + props.public_access = PublicAccess._from_generated( # pylint: disable=protected-access + generated.properties.public_access) + props.has_immutability_policy = generated.properties.has_immutability_policy + props.has_legal_hold = generated.properties.has_legal_hold + props.metadata = generated.metadata + return props + + @classmethod + def _convert_from_container_props(cls, container_properties): + container_properties.__class__ = cls + container_properties.public_access = PublicAccess._from_generated( # pylint: disable=protected-access + container_properties.public_access) + container_properties.lease.__class__ = LeaseProperties + return container_properties + + +class FileSystemPropertiesPaged(ContainerPropertiesPaged): + """An Iterable of File System properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file system name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only file systems whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of file system names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + + def __init__(self, *args, **kwargs): + super(FileSystemPropertiesPaged, self).__init__( + *args, + **kwargs + ) + + @staticmethod + def _build_item(item): + return FileSystemProperties._from_generated(item) # pylint: disable=protected-access + + +class DirectoryProperties(DictMixin): + """ + :ivar str name: name of the directory + :ivar str etag: The ETag contains a value that you can use to perform operations + conditionally. + :ivar bool deleted: if the current directory marked as deleted + :ivar dict metadata: Name-value pairs associated with the directory as metadata. + :ivar ~azure.storage.filedatalake.LeaseProperties lease: + Stores all the lease information for the directory. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the directory was modified. + :ivar ~datetime.datetime creation_time: + Indicates when the directory was created, in UTC. + :ivar int remaining_retention_days: The number of days that the directory will be retained + before being permanently deleted by the service. + :var ~azure.storage.filedatalake.ContentSettings content_settings: + """ + + def __init__(self, **kwargs): + self.name = kwargs.get('name') + self.etag = kwargs.get('ETag') + self.deleted = False + self.metadata = kwargs.get('metadata') + self.lease = LeaseProperties(**kwargs) + self.last_modified = kwargs.get('Last-Modified') + self.creation_time = kwargs.get('x-ms-creation-time') + self.deleted_time = None + self.remaining_retention_days = None + + +class FileProperties(DictMixin): + """ + :ivar str name: name of the file + :ivar str etag: The ETag contains a value that you can use to perform operations + conditionally. + :ivar bool deleted: if the current file marked as deleted + :ivar dict metadata: Name-value pairs associated with the file as metadata. + :ivar ~azure.storage.filedatalake.LeaseProperties lease: + Stores all the lease information for the file. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the file was modified. + :ivar ~datetime.datetime creation_time: + Indicates when the file was created, in UTC. + :ivar int size: size of the file + :ivar int remaining_retention_days: The number of days that the file will be retained + before being permanently deleted by the service. + :var ~azure.storage.filedatalake.ContentSettings content_settings: + """ + + def __init__(self, **kwargs): + self.name = kwargs.get('name') + self.etag = kwargs.get('ETag') + self.deleted = False + self.metadata = kwargs.get('metadata') + self.lease = LeaseProperties(**kwargs) + self.last_modified = kwargs.get('Last-Modified') + self.creation_time = kwargs.get('x-ms-creation-time') + self.size = kwargs.get('Content-Length') + self.deleted_time = None + self.expiry_time = kwargs.get("x-ms-expiry-time") + self.remaining_retention_days = None + self.content_settings = ContentSettings(**kwargs) + + +class PathProperties(object): + """Path properties listed by get_paths api. + + :ivar str name: the full path for a file or directory. + :ivar str owner: The owner of the file or directory. + :ivar str group: he owning group of the file or directory. + :ivar str permissions: Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :ivar datetime last_modified: A datetime object representing the last time the directory/file was modified. + :ivar bool is_directory: is the path a directory or not. + :ivar str etag: The ETag contains a value that you can use to perform operations + conditionally. + :ivar content_length: the size of file if the path is a file. + """ + + def __init__(self, **kwargs): + self.name = kwargs.pop('name', None) + self.owner = kwargs.get('owner', None) + self.group = kwargs.get('group', None) + self.permissions = kwargs.get('permissions', None) + self.last_modified = kwargs.get('last_modified', None) + self.is_directory = kwargs.get('is_directory', False) + self.etag = kwargs.get('etag', None) + self.content_length = kwargs.get('content_length', None) + + @classmethod + def _from_generated(cls, generated): + path_prop = PathProperties() + path_prop.name = generated.name + path_prop.owner = generated.owner + path_prop.group = generated.group + path_prop.permissions = generated.permissions + path_prop.last_modified = datetime.strptime(generated.last_modified, "%a, %d %b %Y %H:%M:%S %Z") + path_prop.is_directory = bool(generated.is_directory) + path_prop.etag = generated.additional_properties.get('etag') + path_prop.content_length = generated.content_length + return path_prop + + +class LeaseProperties(BlobLeaseProperties): + """DataLake Lease Properties. + + :ivar str status: + The lease status of the file. Possible values: locked|unlocked + :ivar str state: + Lease state of the file. Possible values: available|leased|expired|breaking|broken + :ivar str duration: + When a file is leased, specifies whether the lease is of infinite or fixed duration. + """ + + +class ContentSettings(BlobContentSettings): + """The content settings of a file or directory. + + :ivar str content_type: + The content type specified for the file or directory. If no content type was + specified, the default content type is application/octet-stream. + :ivar str content_encoding: + If the content_encoding has previously been set + for the file, that value is stored. + :ivar str content_language: + If the content_language has previously been set + for the file, that value is stored. + :ivar str content_disposition: + content_disposition conveys additional information about how to + process the response payload, and also can be used to attach + additional metadata. If content_disposition has previously been set + for the file, that value is stored. + :ivar str cache_control: + If the cache_control has previously been set for + the file, that value is stored. + :ivar bytearray content_md5: + If the content_md5 has been set for the file, this response + header is stored so that the client can check for message content + integrity. + :keyword str content_type: + The content type specified for the file or directory. If no content type was + specified, the default content type is application/octet-stream. + :keyword str content_encoding: + If the content_encoding has previously been set + for the file, that value is stored. + :keyword str content_language: + If the content_language has previously been set + for the file, that value is stored. + :keyword str content_disposition: + content_disposition conveys additional information about how to + process the response payload, and also can be used to attach + additional metadata. If content_disposition has previously been set + for the file, that value is stored. + :keyword str cache_control: + If the cache_control has previously been set for + the file, that value is stored. + :keyword bytearray content_md5: + If the content_md5 has been set for the file, this response + header is stored so that the client can check for message content + integrity. + """ + + def __init__( + self, **kwargs): + super(ContentSettings, self).__init__( + **kwargs + ) + + +class AccountSasPermissions(BlobAccountSasPermissions): + def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin + create=False): + super(AccountSasPermissions, self).__init__( + read=read, create=create, write=write, list=list, + delete=delete + ) + + +class FileSystemSasPermissions(object): + """FileSystemSasPermissions class to be used with the + :func:`~azure.storage.filedatalake.generate_file_system_sas` function. + + :param bool read: + Read the content, properties, metadata etc. + :param bool write: + Create or write content, properties, metadata. Lease the file system. + :param bool delete: + Delete the file system. + :param bool list: + List paths in the file system. + :keyword bool move: + Move any file in the directory to a new location. + Note the move operation can optionally be restricted to the child file or directory owner or + the parent directory owner if the saoid parameter is included in the token and the sticky bit is set + on the parent directory. + :keyword bool execute: + Get the status (system defined properties) and ACL of any file in the directory. + If the caller is the owner, set access control on any file in the directory. + :keyword bool manage_ownership: + Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory + within a folder that has the sticky bit set. + :keyword bool manage_access_control: + Allows the user to set permissions and POSIX ACLs on files and directories. + """ + + def __init__(self, read=False, write=False, delete=False, list=False, # pylint: disable=redefined-builtin + **kwargs): + self.read = read + self.write = write + self.delete = delete + self.list = list + self.move = kwargs.pop('move', None) + self.execute = kwargs.pop('execute', None) + self.manage_ownership = kwargs.pop('manage_ownership', None) + self.manage_access_control = kwargs.pop('manage_access_control', None) + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('l' if self.list else '') + + ('m' if self.move else '') + + ('e' if self.execute else '') + + ('o' if self.manage_ownership else '') + + ('p' if self.manage_access_control else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a FileSystemSasPermissions from a string. + + To specify read, write, or delete permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, add, create, + write, or delete permissions. + :return: A FileSystemSasPermissions object + :rtype: ~azure.storage.fildatalake.FileSystemSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_list = 'l' in permission + p_move = 'm' in permission + p_execute = 'e' in permission + p_manage_ownership = 'o' in permission + p_manage_access_control = 'p' in permission + + parsed = cls(read=p_read, write=p_write, delete=p_delete, + list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, + manage_access_control=p_manage_access_control) + return parsed + + +class DirectorySasPermissions(object): + """DirectorySasPermissions class to be used with the + :func:`~azure.storage.filedatalake.generate_directory_sas` function. + + :param bool read: + Read the content, properties, metadata etc. + :param bool create: + Create a new directory + :param bool write: + Create or write content, properties, metadata. Lease the directory. + :param bool delete: + Delete the directory. + :keyword bool list: + List any files in the directory. Implies Execute. + :keyword bool move: + Move any file in the directory to a new location. + Note the move operation can optionally be restricted to the child file or directory owner or + the parent directory owner if the saoid parameter is included in the token and the sticky bit is set + on the parent directory. + :keyword bool execute: + Get the status (system defined properties) and ACL of any file in the directory. + If the caller is the owner, set access control on any file in the directory. + :keyword bool manage_ownership: + Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory + within a folder that has the sticky bit set. + :keyword bool manage_access_control: + Allows the user to set permissions and POSIX ACLs on files and directories. + """ + + def __init__(self, read=False, create=False, write=False, + delete=False, **kwargs): + self.read = read + self.create = create + self.write = write + self.delete = delete + self.list = kwargs.pop('list', None) + self.move = kwargs.pop('move', None) + self.execute = kwargs.pop('execute', None) + self.manage_ownership = kwargs.pop('manage_ownership', None) + self.manage_access_control = kwargs.pop('manage_access_control', None) + self._str = (('r' if self.read else '') + + ('c' if self.create else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('l' if self.list else '') + + ('m' if self.move else '') + + ('e' if self.execute else '') + + ('o' if self.manage_ownership else '') + + ('p' if self.manage_access_control else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a DirectorySasPermissions from a string. + + To specify read, create, write, or delete permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, add, create, + write, or delete permissions. + :return: A DirectorySasPermissions object + :rtype: ~azure.storage.filedatalake.DirectorySasPermissions + """ + p_read = 'r' in permission + p_create = 'c' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_list = 'l' in permission + p_move = 'm' in permission + p_execute = 'e' in permission + p_manage_ownership = 'o' in permission + p_manage_access_control = 'p' in permission + + parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, + list=p_list, move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, + manage_access_control=p_manage_access_control) + return parsed + + +class FileSasPermissions(object): + """FileSasPermissions class to be used with the + :func:`~azure.storage.filedatalake.generate_file_sas` function. + + :param bool read: + Read the content, properties, metadata etc. Use the file as + the source of a read operation. + :param bool create: + Write a new file + :param bool write: + Create or write content, properties, metadata. Lease the file. + :param bool delete: + Delete the file. + :keyword bool move: + Move any file in the directory to a new location. + Note the move operation can optionally be restricted to the child file or directory owner or + the parent directory owner if the saoid parameter is included in the token and the sticky bit is set + on the parent directory. + :keyword bool execute: + Get the status (system defined properties) and ACL of any file in the directory. + If the caller is the owner, set access control on any file in the directory. + :keyword bool manage_ownership: + Allows the user to set owner, owning group, or act as the owner when renaming or deleting a file or directory + within a folder that has the sticky bit set. + :keyword bool manage_access_control: + Allows the user to set permissions and POSIX ACLs on files and directories. + """ + + def __init__(self, read=False, create=False, write=False, delete=False, **kwargs): + self.read = read + self.create = create + self.write = write + self.delete = delete + self.list = list + self.move = kwargs.pop('move', None) + self.execute = kwargs.pop('execute', None) + self.manage_ownership = kwargs.pop('manage_ownership', None) + self.manage_access_control = kwargs.pop('manage_access_control', None) + self._str = (('r' if self.read else '') + + ('c' if self.create else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('m' if self.move else '') + + ('e' if self.execute else '') + + ('o' if self.manage_ownership else '') + + ('p' if self.manage_access_control else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a FileSasPermissions from a string. + + To specify read, write, or delete permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, add, create, + write, or delete permissions. + :return: A FileSasPermissions object + :rtype: ~azure.storage.fildatalake.FileSasPermissions + """ + p_read = 'r' in permission + p_create = 'c' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_move = 'm' in permission + p_execute = 'e' in permission + p_manage_ownership = 'o' in permission + p_manage_access_control = 'p' in permission + + parsed = cls(read=p_read, create=p_create, write=p_write, delete=p_delete, + move=p_move, execute=p_execute, manage_ownership=p_manage_ownership, + manage_access_control=p_manage_access_control) + return parsed + + +class AccessPolicy(BlobAccessPolicy): + """Access Policy class used by the set and get access policy methods in each service. + + A stored access policy can specify the start time, expiry time, and + permissions for the Shared Access Signatures with which it's associated. + Depending on how you want to control access to your resource, you can + specify all of these parameters within the stored access policy, and omit + them from the URL for the Shared Access Signature. Doing so permits you to + modify the associated signature's behavior at any time, as well as to revoke + it. Or you can specify one or more of the access policy parameters within + the stored access policy, and the others on the URL. Finally, you can + specify all of the parameters on the URL. In this case, you can use the + stored access policy to revoke the signature, but not to modify its behavior. + + Together the Shared Access Signature and the stored access policy must + include all fields required to authenticate the signature. If any required + fields are missing, the request will fail. Likewise, if a field is specified + both in the Shared Access Signature URL and in the stored access policy, the + request will fail with status code 400 (Bad Request). + + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.datalake.FileSystemSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :keyword start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :paramtype start: ~datetime.datetime or str + """ + + def __init__(self, permission=None, expiry=None, **kwargs): + super(AccessPolicy, self).__init__( + permission=permission, expiry=expiry, start=kwargs.pop('start', None) + ) + + +class ResourceTypes(BlobResourceTypes): + """ + Specifies the resource types that are accessible with the account SAS. + + :param bool service: + Access to service-level APIs (e.g.List File Systems) + :param bool file_system: + Access to file_system-level APIs (e.g., Create/Delete file system, + List Directories/Files) + :param bool object: + Access to object-level APIs for + files(e.g. Create File, etc.) + """ + + def __init__(self, service=False, file_system=False, object=False # pylint: disable=redefined-builtin + ): + super(ResourceTypes, self).__init__(service=service, container=file_system, object=object) + + +class UserDelegationKey(BlobUserDelegationKey): + """ + Represents a user delegation key, provided to the user by Azure Storage + based on their Azure Active Directory access token. + + The fields are saved as simple strings since the user does not have to interact with this object; + to generate an identify SAS, the user can simply pass it to the right API. + + :ivar str signed_oid: + Object ID of this token. + :ivar str signed_tid: + Tenant ID of the tenant that issued this token. + :ivar str signed_start: + The datetime this token becomes valid. + :ivar str signed_expiry: + The datetime this token expires. + :ivar str signed_service: + What service this key is valid for. + :ivar str signed_version: + The version identifier of the REST service that created this token. + :ivar str value: + The user delegation key. + """ + + @classmethod + def _from_generated(cls, generated): + delegation_key = cls() + delegation_key.signed_oid = generated.signed_oid + delegation_key.signed_tid = generated.signed_tid + delegation_key.signed_start = generated.signed_start + delegation_key.signed_expiry = generated.signed_expiry + delegation_key.signed_service = generated.signed_service + delegation_key.signed_version = generated.signed_version + delegation_key.value = generated.value + return delegation_key + + +class PublicAccess(str, Enum): + """ + Specifies whether data in the file system may be accessed publicly and the level of access. + """ + + File = 'blob' + """ + Specifies public read access for files. file data within this file system can be read + via anonymous request, but file system data is not available. Clients cannot enumerate + files within the container via anonymous request. + """ + + FileSystem = 'container' + """ + Specifies full public read access for file system and file data. Clients can enumerate + files within the file system via anonymous request, but cannot enumerate file systems + within the storage account. + """ + + @classmethod + def _from_generated(cls, public_access): + if public_access == "blob": # pylint:disable=no-else-return + return cls.File + elif public_access == "container": + return cls.FileSystem + + return None + + +class LocationMode(object): + """ + Specifies the location the request should be sent to. This mode only applies + for RA-GRS accounts which allow secondary read access. All other account types + must use PRIMARY. + """ + + PRIMARY = 'primary' #: Requests should be sent to the primary location. + SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. + + +class DelimitedJsonDialect(BlobDelimitedJSON): + """Defines the input or output JSON serialization for a datalake query. + + :keyword str delimiter: The line separator character, default value is '\n' + """ + + +class DelimitedTextDialect(BlobDelimitedTextDialect): + """Defines the input or output delimited (CSV) serialization for a datalake query request. + + :keyword str delimiter: + Column separator, defaults to ','. + :keyword str quotechar: + Field quote, defaults to '"'. + :keyword str lineterminator: + Record separator, defaults to '\n'. + :keyword str escapechar: + Escape char, defaults to empty. + :keyword bool has_header: + Whether the blob data includes headers in the first line. The default value is False, meaning that the + data will be returned inclusive of the first line. If set to True, the data will be returned exclusive + of the first line. + """ + + +class ArrowDialect(BlobArrowDialect): + """field of an arrow schema. + + All required parameters must be populated in order to send to Azure. + + :param str type: Required. + :keyword str name: The name of the field. + :keyword int precision: The precision of the field. + :keyword int scale: The scale of the field. + """ + + +class QuickQueryDialect(str, Enum): + """Specifies the quick query input/output dialect.""" + + DelimitedText = 'DelimitedTextDialect' + DelimitedJson = 'DelimitedJsonDialect' + Parquet = 'ParquetDialect' + + +class ArrowType(str, Enum): + + INT64 = "int64" + BOOL = "bool" + TIMESTAMP_MS = "timestamp[ms]" + STRING = "string" + DOUBLE = "double" + DECIMAL = 'decimal' + + +class DataLakeFileQueryError(object): + """The error happened during quick query operation. + + :ivar str error: + The name of the error. + :ivar bool is_fatal: + If true, this error prevents further query processing. More result data may be returned, + but there is no guarantee that all of the original data will be processed. + If false, this error does not prevent further query processing. + :ivar str description: + A description of the error. + :ivar int position: + The blob offset at which the error occurred. + """ + + def __init__(self, error=None, is_fatal=False, description=None, position=None): + self.error = error + self.is_fatal = is_fatal + self.description = description + self.position = position + + +class AccessControlChangeCounters(DictMixin): + """ + AccessControlChangeCounters contains counts of operations that change Access Control Lists recursively. + + :ivar int directories_successful: + Number of directories where Access Control List has been updated successfully. + :ivar int files_successful: + Number of files where Access Control List has been updated successfully. + :ivar int failure_count: + Number of paths where Access Control List update has failed. + """ + + def __init__(self, directories_successful, files_successful, failure_count): + self.directories_successful = directories_successful + self.files_successful = files_successful + self.failure_count = failure_count + + +class AccessControlChangeResult(DictMixin): + """ + AccessControlChangeResult contains result of operations that change Access Control Lists recursively. + + :ivar ~azure.storage.filedatalake.AccessControlChangeCounters counters: + Contains counts of paths changed from start of the operation. + :ivar str continuation: + Optional continuation token. + Value is present when operation is split into multiple batches and can be used to resume progress. + """ + + def __init__(self, counters, continuation): + self.counters = counters + self.continuation = continuation + + +class AccessControlChangeFailure(DictMixin): + """ + Represents an entry that failed to update Access Control List. + + :ivar str name: + Name of the entry. + :ivar bool is_directory: + Indicates whether the entry is a directory. + :ivar str error_message: + Indicates the reason why the entry failed to update. + """ + + def __init__(self, name, is_directory, error_message): + self.name = name + self.is_directory = is_directory + self.error_message = error_message + + +class AccessControlChanges(DictMixin): + """ + AccessControlChanges contains batch and cumulative counts of operations + that change Access Control Lists recursively. + Additionally it exposes path entries that failed to update while these operations progress. + + :ivar ~azure.storage.filedatalake.AccessControlChangeCounters batch_counters: + Contains counts of paths changed within single batch. + :ivar ~azure.storage.filedatalake.AccessControlChangeCounters aggregate_counters: + Contains counts of paths changed from start of the operation. + :ivar list(~azure.storage.filedatalake.AccessControlChangeFailure) batch_failures: + List of path entries that failed to update Access Control List within single batch. + :ivar str continuation: + An opaque continuation token that may be used to resume the operations in case of failures. + """ + + def __init__(self, batch_counters, aggregate_counters, batch_failures, continuation): + self.batch_counters = batch_counters + self.aggregate_counters = aggregate_counters + self.batch_failures = batch_failures + self.continuation = continuation + + +class DeletedPathProperties(DictMixin): + """ + Properties populated for a deleted path. + + :ivar str name: + The name of the file in the path. + :ivar ~datetime.datetime deleted_time: + A datetime object representing the time at which the path was deleted. + :ivar int remaining_retention_days: + The number of days that the path will be retained before being permanently deleted by the service. + :ivar str deletion_id: + The id associated with the deleted path. + """ + def __init__(self, **kwargs): + self.name = kwargs.get('name') + self.deleted_time = None + self.remaining_retention_days = None + self.deletion_id = None + + +class AnalyticsLogging(GenLogging): + """Azure Analytics Logging settings. + + :keyword str version: + The version of Storage Analytics to configure. The default value is 1.0. + :keyword bool delete: + Indicates whether all delete requests should be logged. The default value is `False`. + :keyword bool read: + Indicates whether all read requests should be logged. The default value is `False`. + :keyword bool write: + Indicates whether all write requests should be logged. The default value is `False`. + :keyword ~azure.storage.filedatalake.RetentionPolicy retention_policy: + Determines how long the associated data should persist. If not specified the retention + policy will be disabled by default. + """ + + def __init__(self, **kwargs): + self.version = kwargs.get('version', u'1.0') + self.delete = kwargs.get('delete', False) + self.read = kwargs.get('read', False) + self.write = kwargs.get('write', False) + self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + version=generated.version, + delete=generated.delete, + read=generated.read, + write=generated.write, + retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access + ) + + +class Metrics(GenMetrics): + """A summary of request statistics grouped by API in hour or minute aggregates. + + :keyword str version: + The version of Storage Analytics to configure. The default value is 1.0. + :keyword bool enabled: + Indicates whether metrics are enabled for the Datalake service. + The default value is `False`. + :keyword bool include_apis: + Indicates whether metrics should generate summary statistics for called API operations. + :keyword ~azure.storage.filedatalake.RetentionPolicy retention_policy: + Determines how long the associated data should persist. If not specified the retention + policy will be disabled by default. + """ + + def __init__(self, **kwargs): + self.version = kwargs.get('version', u'1.0') + self.enabled = kwargs.get('enabled', False) + self.include_apis = kwargs.get('include_apis') + self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + version=generated.version, + enabled=generated.enabled, + include_apis=generated.include_apis, + retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access + ) + + +class RetentionPolicy(GenRetentionPolicy): + """The retention policy which determines how long the associated data should + persist. + + :param bool enabled: + Indicates whether a retention policy is enabled for the storage service. + The default value is False. + :param int days: + Indicates the number of days that metrics or logging or + soft-deleted data should be retained. All data older than this value will + be deleted. If enabled=True, the number of days must be specified. + """ + + def __init__(self, enabled=False, days=None): + super(RetentionPolicy, self).__init__(enabled=enabled, days=days, allow_permanent_delete=None) + if self.enabled and (self.days is None): + raise ValueError("If policy is enabled, 'days' must be specified.") + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + enabled=generated.enabled, + days=generated.days, + ) + + +class StaticWebsite(GenStaticWebsite): + """The properties that enable an account to host a static website. + + :keyword bool enabled: + Indicates whether this account is hosting a static website. + The default value is `False`. + :keyword str index_document: + The default name of the index page under each directory. + :keyword str error_document404_path: + The absolute path of the custom 404 page. + :keyword str default_index_document_path: + Absolute path of the default index page. + """ + + def __init__(self, **kwargs): + self.enabled = kwargs.get('enabled', False) + if self.enabled: + self.index_document = kwargs.get('index_document') + self.error_document404_path = kwargs.get('error_document404_path') + self.default_index_document_path = kwargs.get('default_index_document_path') + else: + self.index_document = None + self.error_document404_path = None + self.default_index_document_path = None + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + enabled=generated.enabled, + index_document=generated.index_document, + error_document404_path=generated.error_document404_path, + default_index_document_path=generated.default_index_document_path + ) + + +class CorsRule(GenCorsRule): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + :param list(str) allowed_origins: + A list of origin domains that will be allowed via CORS, or "*" to allow + all domains. The list of must contain at least one entry. Limited to 64 + origin domains. Each allowed origin can have up to 256 characters. + :param list(str) allowed_methods: + A list of HTTP methods that are allowed to be executed by the origin. + The list of must contain at least one entry. For Azure Storage, + permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. + :keyword list(str) allowed_headers: + Defaults to an empty list. A list of headers allowed to be part of + the cross-origin request. Limited to 64 defined headers and 2 prefixed + headers. Each header can be up to 256 characters. + :keyword list(str) exposed_headers: + Defaults to an empty list. A list of response headers to expose to CORS + clients. Limited to 64 defined headers and two prefixed headers. Each + header can be up to 256 characters. + :keyword int max_age_in_seconds: + The number of seconds that the client/browser should cache a + preflight response. + """ + + def __init__(self, allowed_origins, allowed_methods, **kwargs): + self.allowed_origins = ','.join(allowed_origins) + self.allowed_methods = ','.join(allowed_methods) + self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) + self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) + self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) + + @classmethod + def _from_generated(cls, generated): + return cls( + [generated.allowed_origins], + [generated.allowed_methods], + allowed_headers=[generated.allowed_headers], + exposed_headers=[generated.exposed_headers], + max_age_in_seconds=generated.max_age_in_seconds, + ) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_path_client.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_path_client.py new file mode 100644 index 0000000..d0bbeb3 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_path_client.py @@ -0,0 +1,902 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from datetime import datetime +from typing import Any, Dict, Union + +try: + from urllib.parse import urlparse, quote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote # type: ignore + +import six + +from azure.core.exceptions import AzureError, HttpResponseError +from azure.multiapi.storagev2.blob.v2020_06_12 import BlobClient +from ._data_lake_lease import DataLakeLeaseClient +from ._deserialize import process_storage_error +from ._generated import AzureDataLakeStorageRESTAPI +from ._models import LocationMode, DirectoryProperties, AccessControlChangeResult, AccessControlChanges, \ + AccessControlChangeCounters, AccessControlChangeFailure +from ._serialize import convert_dfs_url_to_blob_url, get_mod_conditions, \ + get_path_http_headers, add_metadata_headers, get_lease_id, get_source_mod_conditions, get_access_conditions, \ + get_api_version +from ._shared.base_client import StorageAccountHostsMixin, parse_query +from ._shared.response_handlers import return_response_headers, return_headers_and_deserialized + +_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( + 'The require_encryption flag is set, but encryption is not supported' + ' for this method.') + + +class PathClient(StorageAccountHostsMixin): + def __init__( + self, account_url, # type: str + file_system_name, # type: str + path_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + + # remove the preceding/trailing delimiter from the path components + file_system_name = file_system_name.strip('/') + + # the name of root directory is / + if path_name != '/': + path_name = path_name.strip('/') + + if not (file_system_name and path_name): + raise ValueError("Please specify a file system name and file path.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + blob_account_url = convert_dfs_url_to_blob_url(account_url) + self._blob_account_url = blob_account_url + + datalake_hosts = kwargs.pop('_hosts', None) + blob_hosts = None + if datalake_hosts: + blob_primary_account_url = convert_dfs_url_to_blob_url(datalake_hosts[LocationMode.PRIMARY]) + blob_hosts = {LocationMode.PRIMARY: blob_primary_account_url, LocationMode.SECONDARY: ""} + self._blob_client = BlobClient(blob_account_url, file_system_name, path_name, + credential=credential, _hosts=blob_hosts, **kwargs) + + _, sas_token = parse_query(parsed_url.query) + self.file_system_name = file_system_name + self.path_name = path_name + + self._query_str, self._raw_credential = self._format_query_string(sas_token, credential) + + super(PathClient, self).__init__(parsed_url, service='dfs', credential=self._raw_credential, + _hosts=datalake_hosts, **kwargs) + # ADLS doesn't support secondary endpoint, make sure it's empty + self._hosts[LocationMode.SECONDARY] = "" + api_version = get_api_version(kwargs) + + self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, path=path_name, + pipeline=self._pipeline) + self._client._config.version = api_version # pylint: disable=protected-access + + self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI( + self._blob_client.url, + file_system=file_system_name, + path=path_name, + pipeline=self._pipeline) + self._datalake_client_for_blob_operation._config.version = api_version # pylint: disable=protected-access + + def __exit__(self, *args): + self._blob_client.close() + super(PathClient, self).__exit__(*args) + + def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._blob_client.close() + self.__exit__() + + def _format_url(self, hostname): + file_system_name = self.file_system_name + if isinstance(file_system_name, six.text_type): + file_system_name = file_system_name.encode('UTF-8') + return "{}://{}/{}/{}{}".format( + self.scheme, + hostname, + quote(file_system_name), + quote(self.path_name, safe='~'), + self._query_str) + + def _create_path_options(self, resource_type, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_mod_conditions(kwargs) + + path_http_headers = None + if content_settings: + path_http_headers = get_path_http_headers(content_settings) + + options = { + 'resource': resource_type, + 'properties': add_metadata_headers(metadata), + 'permissions': kwargs.pop('permissions', None), + 'umask': kwargs.pop('umask', None), + 'path_http_headers': path_http_headers, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create directory or file + + :param resource_type: + Required for Create File and Create Directory. + The value must be "file" or "directory". Possible values include: + 'directory', 'file' + :type resource_type: str + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file/directory as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Dict[str, Union[str, datetime]] + """ + options = self._create_path_options( + resource_type, + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return self._client.path.create(**options) + except HttpResponseError as error: + process_storage_error(error) + + @staticmethod + def _delete_path_options(**kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_mod_conditions(kwargs) + + options = { + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers, + 'timeout': kwargs.pop('timeout', None)} + options.update(kwargs) + return options + + def _delete(self, **kwargs): + # type: (**Any) -> Dict[Union[datetime, str]] + """ + Marks the specified path for deletion. + + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :type lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :param ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + options = self._delete_path_options(**kwargs) + try: + return self._client.path.delete(**options) + except HttpResponseError as error: + process_storage_error(error) + + @staticmethod + def _set_access_control_options(owner=None, group=None, permissions=None, acl=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_mod_conditions(kwargs) + + options = { + 'owner': owner, + 'group': group, + 'permissions': permissions, + 'acl': acl, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + def set_access_control(self, owner=None, # type: Optional[str] + group=None, # type: Optional[str] + permissions=None, # type: Optional[str] + acl=None, # type: Optional[str] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Set the owner, group, permissions, or access control list for a path. + + :param owner: + Optional. The owner of the file or directory. + :type owner: str + :param group: + Optional. The owning group of the file or directory. + :type group: str + :param permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + permissions and acl are mutually exclusive. + :type permissions: str + :param acl: + Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + permissions and acl are mutually exclusive. + :type acl: str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword: response dict (Etag and last modified). + """ + if not any([owner, group, permissions, acl]): + raise ValueError("At least one parameter should be set for set_access_control API") + options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) + try: + return self._client.path.set_access_control(**options) + except HttpResponseError as error: + process_storage_error(error) + + @staticmethod + def _get_access_control_options(upn=None, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Any] + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_mod_conditions(kwargs) + + options = { + 'action': 'getAccessControl', + 'upn': upn if upn else False, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + def get_access_control(self, upn=None, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Any] + """ + :param upn: Optional. + Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword: response dict. + """ + options = self._get_access_control_options(upn=upn, **kwargs) + try: + return self._client.path.get_properties(**options) + except HttpResponseError as error: + process_storage_error(error) + + @staticmethod + def _set_access_control_recursive_options(mode, acl, **kwargs): + # type: (str, str, **Any) -> Dict[str, Any] + + options = { + 'mode': mode, + 'force_flag': kwargs.pop('continue_on_failure', None), + 'timeout': kwargs.pop('timeout', None), + 'continuation': kwargs.pop('continuation_token', None), + 'max_records': kwargs.pop('batch_size', None), + 'acl': acl, + 'cls': return_headers_and_deserialized} + options.update(kwargs) + return options + + def set_access_control_recursive(self, + acl, + **kwargs): + # type: (str, **Any) -> AccessControlChangeResult + """ + Sets the Access Control on a path and sub-paths. + + :param acl: + Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: + Callback where the caller can track progress of the operation + as well as collect paths that failed to change Access Control. + :keyword str continuation_token: + Optional continuation token that can be used to resume previously stopped operation. + :keyword int batch_size: + Optional. If data set size exceeds batch size then operation will be split into multiple + requests so that progress can be tracked. Batch size should be between 1 and 2000. + The default when unspecified is 2000. + :keyword int max_batches: + Optional. Defines maximum number of batches that single change Access Control operation can execute. + If maximum is reached before all sub-paths are processed, + then continuation token can be used to resume operation. + Empty value indicates that maximum number of batches in unbound and operation continues till end. + :keyword bool continue_on_failure: + If set to False, the operation will terminate quickly on encountering user errors (4XX). + If True, the operation will ignore user errors and proceed with the operation on other sub-entities of + the directory. + Continuation token will only be returned when continue_on_failure is True in case of user errors. + If not set the default value is False for this. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: A summary of the recursive operations, including the count of successes and failures, + as well as a continuation token in case the operation was terminated prematurely. + :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` + :raises ~azure.core.exceptions.AzureError: + User can restart the operation using continuation_token field of AzureError if the token is available. + """ + if not acl: + raise ValueError("The Access Control List must be set for this operation") + + progress_hook = kwargs.pop('progress_hook', None) + max_batches = kwargs.pop('max_batches', None) + options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs) + return self._set_access_control_internal(options=options, progress_hook=progress_hook, + max_batches=max_batches) + + def update_access_control_recursive(self, + acl, + **kwargs): + # type: (str, **Any) -> AccessControlChangeResult + """ + Modifies the Access Control on a path and sub-paths. + + :param acl: + Modifies POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: + Callback where the caller can track progress of the operation + as well as collect paths that failed to change Access Control. + :keyword str continuation_token: + Optional continuation token that can be used to resume previously stopped operation. + :keyword int batch_size: + Optional. If data set size exceeds batch size then operation will be split into multiple + requests so that progress can be tracked. Batch size should be between 1 and 2000. + The default when unspecified is 2000. + :keyword int max_batches: + Optional. Defines maximum number of batches that single change Access Control operation can execute. + If maximum is reached before all sub-paths are processed, + then continuation token can be used to resume operation. + Empty value indicates that maximum number of batches in unbound and operation continues till end. + :keyword bool continue_on_failure: + If set to False, the operation will terminate quickly on encountering user errors (4XX). + If True, the operation will ignore user errors and proceed with the operation on other sub-entities of + the directory. + Continuation token will only be returned when continue_on_failure is True in case of user errors. + If not set the default value is False for this. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: A summary of the recursive operations, including the count of successes and failures, + as well as a continuation token in case the operation was terminated prematurely. + :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` + :raises ~azure.core.exceptions.AzureError: + User can restart the operation using continuation_token field of AzureError if the token is available. + """ + if not acl: + raise ValueError("The Access Control List must be set for this operation") + + progress_hook = kwargs.pop('progress_hook', None) + max_batches = kwargs.pop('max_batches', None) + options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs) + return self._set_access_control_internal(options=options, progress_hook=progress_hook, + max_batches=max_batches) + + def remove_access_control_recursive(self, + acl, + **kwargs): + # type: (str, **Any) -> AccessControlChangeResult + """ + Removes the Access Control on a path and sub-paths. + + :param acl: + Removes POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, and a user or + group identifier in the format "[scope:][type]:[id]". + :type acl: str + :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: + Callback where the caller can track progress of the operation + as well as collect paths that failed to change Access Control. + :keyword str continuation_token: + Optional continuation token that can be used to resume previously stopped operation. + :keyword int batch_size: + Optional. If data set size exceeds batch size then operation will be split into multiple + requests so that progress can be tracked. Batch size should be between 1 and 2000. + The default when unspecified is 2000. + :keyword int max_batches: + Optional. Defines maximum number of batches that single change Access Control operation can execute. + If maximum is reached before all sub-paths are processed then, + continuation token can be used to resume operation. + Empty value indicates that maximum number of batches in unbound and operation continues till end. + :keyword bool continue_on_failure: + If set to False, the operation will terminate quickly on encountering user errors (4XX). + If True, the operation will ignore user errors and proceed with the operation on other sub-entities of + the directory. + Continuation token will only be returned when continue_on_failure is True in case of user errors. + If not set the default value is False for this. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: A summary of the recursive operations, including the count of successes and failures, + as well as a continuation token in case the operation was terminated prematurely. + :rtype: :class:`~azure.storage.filedatalake.AccessControlChangeResult` + :raises ~azure.core.exceptions.AzureError: + User can restart the operation using continuation_token field of AzureError if the token is available. + """ + if not acl: + raise ValueError("The Access Control List must be set for this operation") + + progress_hook = kwargs.pop('progress_hook', None) + max_batches = kwargs.pop('max_batches', None) + options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs) + return self._set_access_control_internal(options=options, progress_hook=progress_hook, + max_batches=max_batches) + + def _set_access_control_internal(self, options, progress_hook, max_batches=None): + try: + continue_on_failure = options.get('force_flag') + total_directories_successful = 0 + total_files_success = 0 + total_failure_count = 0 + batch_count = 0 + last_continuation_token = None + current_continuation_token = None + continue_operation = True + while continue_operation: + headers, resp = self._client.path.set_access_control_recursive(**options) + + # make a running tally so that we can report the final results + total_directories_successful += resp.directories_successful + total_files_success += resp.files_successful + total_failure_count += resp.failure_count + batch_count += 1 + current_continuation_token = headers['continuation'] + + if current_continuation_token is not None: + last_continuation_token = current_continuation_token + + if progress_hook is not None: + progress_hook(AccessControlChanges( + batch_counters=AccessControlChangeCounters( + directories_successful=resp.directories_successful, + files_successful=resp.files_successful, + failure_count=resp.failure_count, + ), + aggregate_counters=AccessControlChangeCounters( + directories_successful=total_directories_successful, + files_successful=total_files_success, + failure_count=total_failure_count, + ), + batch_failures=[AccessControlChangeFailure( + name=failure.name, + is_directory=failure.type == 'DIRECTORY', + error_message=failure.error_message) for failure in resp.failed_entries], + continuation=last_continuation_token)) + + # update the continuation token, if there are more operations that cannot be completed in a single call + max_batches_satisfied = (max_batches is not None and batch_count == max_batches) + continue_operation = bool(current_continuation_token) and not max_batches_satisfied + options['continuation'] = current_continuation_token + + # currently the service stops on any failure, so we should send back the last continuation token + # for the user to retry the failed updates + # otherwise we should just return what the service gave us + return AccessControlChangeResult(counters=AccessControlChangeCounters( + directories_successful=total_directories_successful, + files_successful=total_files_success, + failure_count=total_failure_count), + continuation=last_continuation_token + if total_failure_count > 0 and not continue_on_failure else current_continuation_token) + except HttpResponseError as error: + error.continuation_token = last_continuation_token + process_storage_error(error) + except AzureError as error: + error.continuation_token = last_continuation_token + raise error + + def _rename_path_options(self, rename_source, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + if metadata or kwargs.pop('permissions', None) or kwargs.pop('umask', None): + raise ValueError("metadata, permissions, umask is not supported for this operation") + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + source_lease_id = get_lease_id(kwargs.pop('source_lease', None)) + mod_conditions = get_mod_conditions(kwargs) + source_mod_conditions = get_source_mod_conditions(kwargs) + + path_http_headers = None + if content_settings: + path_http_headers = get_path_http_headers(content_settings) + + options = { + 'rename_source': rename_source, + 'path_http_headers': path_http_headers, + 'lease_access_conditions': access_conditions, + 'source_lease_id': source_lease_id, + 'modified_access_conditions': mod_conditions, + 'source_modified_access_conditions': source_mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'mode': 'legacy', + 'cls': return_response_headers} + options.update(kwargs) + return options + + def _rename_path(self, rename_source, **kwargs): + # type: (str, **Any) -> Dict[str, Any] + """ + Rename directory or file + + :param rename_source: + The value must have the following format: "/{filesystem}/{path}". + :type rename_source: str + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword source_lease: + A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + options = self._rename_path_options( + rename_source, + **kwargs) + try: + return self._client.path.create(**options) + except HttpResponseError as error: + process_storage_error(error) + + def _get_path_properties(self, **kwargs): + # type: (**Any) -> Union[FileProperties, DirectoryProperties] + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file or directory. It does not return the content of the directory or file. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: DirectoryProperties or FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../tests/test_blob_samples_common.py + :start-after: [START get_blob_properties] + :end-before: [END get_blob_properties] + :language: python + :dedent: 8 + :caption: Getting the properties for a file/directory. + """ + path_properties = self._blob_client.get_blob_properties(**kwargs) + return path_properties + + def _exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a path exists and returns False otherwise. + + :kwarg int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + return self._blob_client.exists(**kwargs) + + def set_metadata(self, metadata, # type: Dict[str, str] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + file system. Each call to this operation replaces all existing metadata + attached to the file system. To remove all metadata from the file system, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the file system as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: file system-updated property dict (Etag and last modified). + """ + return self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) + + def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings] + **kwargs): + # type: (...) -> Dict[str, Any] + """Sets system properties on the file or directory. + + If one property is set for the content_settings, all properties will be overriden. + + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set file/directory properties. + :keyword lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: file/directory-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + return self._blob_client.set_http_headers(content_settings=content_settings, **kwargs) + + def acquire_lease(self, lease_duration=-1, # type: Optional[int] + lease_id=None, # type: Optional[str] + **kwargs): + # type: (...) -> DataLakeLeaseClient + """ + Requests a new lease. If the file or directory does not have an active lease, + the DataLake service creates a lease on the file/directory and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A DataLakeLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.filedatalake.DataLakeLeaseClient + """ + lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore + lease.acquire(lease_duration=lease_duration, **kwargs) + return lease diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_quick_query_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_quick_query_helper.py new file mode 100644 index 0000000..ff67d27 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_quick_query_helper.py @@ -0,0 +1,71 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import Union, Iterable, IO # pylint: disable=unused-import + + +class DataLakeFileQueryReader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to read query results. + + :ivar str name: + The name of the blob being quered. + :ivar str container: + The name of the container where the blob is. + :ivar dict response_headers: + The response_headers of the quick query request. + :ivar bytes record_delimiter: + The delimiter used to separate lines, or records with the data. The `records` + method will return these lines via a generator. + """ + + def __init__( + self, + blob_query_reader + ): + self.name = blob_query_reader.name + self.file_system = blob_query_reader.container + self.response_headers = blob_query_reader.response_headers + self.record_delimiter = blob_query_reader.record_delimiter + self._bytes_processed = 0 + self._blob_query_reader = blob_query_reader + + def __len__(self): + return len(self._blob_query_reader) + + def readall(self): + # type: () -> Union[bytes, str] + """Return all query results. + + This operation is blocking until all data is downloaded. + If encoding has been configured - this will be used to decode individual + records are they are received. + + :rtype: Union[bytes, str] + """ + return self._blob_query_reader.readall() + + def readinto(self, stream): + # type: (IO) -> None + """Download the query result to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. + :returns: None + """ + self._blob_query_reader(stream) + + def records(self): + # type: () -> Iterable[Union[bytes, str]] + """Returns a record generator for the query result. + + Records will be returned line by line. + If encoding has been configured - this will be used to decode individual + records are they are received. + + :rtype: Iterable[Union[bytes, str]] + """ + return self._blob_query_reader.records() diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_serialize.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_serialize.py new file mode 100644 index 0000000..88fac96 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_serialize.py @@ -0,0 +1,111 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from azure.multiapi.storagev2.blob.v2020_06_12._serialize import _get_match_headers # pylint: disable=protected-access +from ._shared import encode_base64 +from ._generated.models import ModifiedAccessConditions, PathHTTPHeaders, \ + SourceModifiedAccessConditions, LeaseAccessConditions + + +_SUPPORTED_API_VERSIONS = [ + '2019-02-02', + '2019-07-07', + '2019-10-10', + '2019-12-12', + '2020-02-10', + '2020-04-08', + '2020-06-12', + '2020-08-04', + '2020-10-02' +] + + +def get_api_version(kwargs): + # type: (Dict[str, Any], str) -> str + api_version = kwargs.get('api_version', None) + if api_version and api_version not in _SUPPORTED_API_VERSIONS: + versions = '\n'.join(_SUPPORTED_API_VERSIONS) + raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) + return api_version or _SUPPORTED_API_VERSIONS[-1] + + +def convert_dfs_url_to_blob_url(dfs_account_url): + return dfs_account_url.replace('.dfs.', '.blob.', 1) + + +def convert_datetime_to_rfc1123(date): + weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][date.weekday()] + month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", + "Oct", "Nov", "Dec"][date.month - 1] + return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (weekday, date.day, month, + date.year, date.hour, date.minute, date.second) + + +def add_metadata_headers(metadata=None): + # type: (Optional[Dict[str, str]]) -> str + headers = list() + if metadata: + for key, value in metadata.items(): + headers.append(key + '=') + headers.append(encode_base64(value)) + headers.append(',') + + if headers: + del headers[-1] + + return ''.join(headers) + + +def get_mod_conditions(kwargs): + # type: (Dict[str, Any]) -> ModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') + return ModifiedAccessConditions( + if_modified_since=kwargs.pop('if_modified_since', None), + if_unmodified_since=kwargs.pop('if_unmodified_since', None), + if_match=if_match or kwargs.pop('if_match', None), + if_none_match=if_none_match or kwargs.pop('if_none_match', None) + ) + + +def get_source_mod_conditions(kwargs): + # type: (Dict[str, Any]) -> SourceModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') + return SourceModifiedAccessConditions( + source_if_modified_since=kwargs.pop('source_if_modified_since', None), + source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), + source_if_match=if_match or kwargs.pop('source_if_match', None), + source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) + ) + + +def get_path_http_headers(content_settings): + path_headers = PathHTTPHeaders( + cache_control=content_settings.cache_control, + content_type=content_settings.content_type, + content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + content_encoding=content_settings.content_encoding, + content_language=content_settings.content_language, + content_disposition=content_settings.content_disposition + ) + return path_headers + + +def get_access_conditions(lease): + # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] + try: + lease_id = lease.id # type: ignore + except AttributeError: + lease_id = lease # type: ignore + return LeaseAccessConditions(lease_id=lease_id) if lease_id else None + + +def get_lease_id(lease): + if not lease: + return "" + try: + lease_id = lease.id + except AttributeError: + lease_id = lease + return lease_id diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/__init__.py new file mode 100644 index 0000000..160f882 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/__init__.py @@ -0,0 +1,56 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import hmac + +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore + +import six + + +def url_quote(url): + return quote(url) + + +def url_unquote(url): + return unquote(url) + + +def encode_base64(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def decode_base64_to_bytes(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + return base64.b64decode(data) + + +def decode_base64_to_text(data): + decoded_bytes = decode_base64_to_bytes(data) + return decoded_bytes.decode('utf-8') + + +def sign_string(key, string_to_sign, key_is_base64=True): + if key_is_base64: + key = decode_base64_to_bytes(key) + else: + if isinstance(key, six.text_type): + key = key.encode('utf-8') + if isinstance(string_to_sign, six.text_type): + string_to_sign = string_to_sign.encode('utf-8') + signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) + digest = signed_hmac_sha256.digest() + encoded_digest = encode_base64(digest) + return encoded_digest diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/authentication.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/authentication.py new file mode 100644 index 0000000..d04c1e4 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/authentication.py @@ -0,0 +1,142 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import logging +import sys + +try: + from urllib.parse import urlparse, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import unquote # type: ignore + +try: + from yarl import URL +except ImportError: + pass + +try: + from azure.core.pipeline.transport import AioHttpTransport +except ImportError: + AioHttpTransport = None + +from azure.core.exceptions import ClientAuthenticationError +from azure.core.pipeline.policies import SansIOHTTPPolicy + +from . import sign_string + + +logger = logging.getLogger(__name__) + + + +# wraps a given exception with the desired exception type +def _wrap_exception(ex, desired_type): + msg = "" + if ex.args: + msg = ex.args[0] + if sys.version_info >= (3,): + # Automatic chaining in Python 3 means we keep the trace + return desired_type(msg) + # There isn't a good solution in 2 for keeping the stack trace + # in general, or that will not result in an error in 3 + # However, we can keep the previous error type and message + # TODO: In the future we will log the trace + return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) + + +class AzureSigningError(ClientAuthenticationError): + """ + Represents a fatal error when attempting to sign a request. + In general, the cause of this exception is user error. For example, the given account key is not valid. + Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. + """ + + +# pylint: disable=no-self-use +class SharedKeyCredentialPolicy(SansIOHTTPPolicy): + + def __init__(self, account_name, account_key): + self.account_name = account_name + self.account_key = account_key + super(SharedKeyCredentialPolicy, self).__init__() + + @staticmethod + def _get_headers(request, headers_to_sign): + headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) + if 'content-length' in headers and headers['content-length'] == '0': + del headers['content-length'] + return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' + + @staticmethod + def _get_verb(request): + return request.http_request.method + '\n' + + def _get_canonicalized_resource(self, request): + uri_path = urlparse(request.http_request.url).path + try: + if isinstance(request.context.transport, AioHttpTransport) or \ + isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ + isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), + AioHttpTransport): + uri_path = URL(uri_path) + return '/' + self.account_name + str(uri_path) + except TypeError: + pass + return '/' + self.account_name + uri_path + + @staticmethod + def _get_canonicalized_headers(request): + string_to_sign = '' + x_ms_headers = [] + for name, value in request.http_request.headers.items(): + if name.startswith('x-ms-'): + x_ms_headers.append((name.lower(), value)) + x_ms_headers.sort() + for name, value in x_ms_headers: + if value is not None: + string_to_sign += ''.join([name, ':', value, '\n']) + return string_to_sign + + @staticmethod + def _get_canonicalized_resource_query(request): + sorted_queries = list(request.http_request.query.items()) + sorted_queries.sort() + + string_to_sign = '' + for name, value in sorted_queries: + if value is not None: + string_to_sign += '\n' + name.lower() + ':' + unquote(value) + + return string_to_sign + + def _add_authorization_header(self, request, string_to_sign): + try: + signature = sign_string(self.account_key, string_to_sign) + auth_string = 'SharedKey ' + self.account_name + ':' + signature + request.http_request.headers['Authorization'] = auth_string + except Exception as ex: + # Wrap any error that occurred as signing error + # Doing so will clarify/locate the source of problem + raise _wrap_exception(ex, AzureSigningError) + + def on_request(self, request): + string_to_sign = \ + self._get_verb(request) + \ + self._get_headers( + request, + [ + 'content-encoding', 'content-language', 'content-length', + 'content-md5', 'content-type', 'date', 'if-modified-since', + 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' + ] + ) + \ + self._get_canonicalized_headers(request) + \ + self._get_canonicalized_resource(request) + \ + self._get_canonicalized_resource_query(request) + + self._add_authorization_header(request, string_to_sign) + #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/base_client.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/base_client.py new file mode 100644 index 0000000..5e524b2 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/base_client.py @@ -0,0 +1,459 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import logging +import uuid +from typing import ( # pylint: disable=unused-import + Optional, + Any, + Tuple, +) + +try: + from urllib.parse import parse_qs, quote +except ImportError: + from urlparse import parse_qs # type: ignore + from urllib2 import quote # type: ignore + +import six + +from azure.core.configuration import Configuration +from azure.core.credentials import AzureSasCredential +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline import Pipeline +from azure.core.pipeline.transport import RequestsTransport, HttpTransport +from azure.core.pipeline.policies import ( + RedirectPolicy, + ContentDecodePolicy, + BearerTokenCredentialPolicy, + ProxyPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, + UserAgentPolicy, + AzureSasCredentialPolicy +) + +from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .models import LocationMode +from .authentication import SharedKeyCredentialPolicy +from .shared_access_signature import QueryStringConstants +from .request_handlers import serialize_batch_body, _get_batch_request_delimiter +from .policies import ( + StorageHeadersPolicy, + StorageContentValidation, + StorageRequestHook, + StorageResponseHook, + StorageLoggingPolicy, + StorageHosts, + QueueMessagePolicy, + ExponentialRetry, +) +from .._version import VERSION +from .response_handlers import process_storage_error, PartialBatchErrorException + + +_LOGGER = logging.getLogger(__name__) +_SERVICE_PARAMS = { + "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, + "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, + "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, + "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, +} + +class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + parsed_url, # type: Any + service, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) + self._hosts = kwargs.get("_hosts") + self.scheme = parsed_url.scheme + + if service not in ["blob", "queue", "file-share", "dfs"]: + raise ValueError("Invalid service: {}".format(service)) + service_name = service.split('-')[0] + account = parsed_url.netloc.split(".{}.core.".format(service_name)) + + self.account_name = account[0] if len(account) > 1 else None + if not self.account_name and parsed_url.netloc.startswith("localhost") \ + or parsed_url.netloc.startswith("127.0.0.1"): + self.account_name = parsed_url.path.strip("/") + + self.credential = _format_shared_key_credential(self.account_name, credential) + if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): + raise ValueError("Token credential is only supported with HTTPS.") + + secondary_hostname = None + if hasattr(self.credential, "account_name"): + self.account_name = self.credential.account_name + secondary_hostname = "{}-secondary.{}.{}".format( + self.credential.account_name, service_name, SERVICE_HOST_BASE) + + if not self._hosts: + if len(account) > 1: + secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") + if kwargs.get("secondary_hostname"): + secondary_hostname = kwargs["secondary_hostname"] + primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') + self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} + + self.require_encryption = kwargs.get("require_encryption", False) + self.key_encryption_key = kwargs.get("key_encryption_key") + self.key_resolver_function = kwargs.get("key_resolver_function") + self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) + + def __enter__(self): + self._client.__enter__() + return self + + def __exit__(self, *args): + self._client.__exit__(*args) + + def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._client.close() + + @property + def url(self): + """The full endpoint URL to this entity, including SAS token if used. + + This could be either the primary endpoint, + or the secondary endpoint depending on the current :func:`location_mode`. + """ + return self._format_url(self._hosts[self._location_mode]) + + @property + def primary_endpoint(self): + """The full primary endpoint URL. + + :type: str + """ + return self._format_url(self._hosts[LocationMode.PRIMARY]) + + @property + def primary_hostname(self): + """The hostname of the primary endpoint. + + :type: str + """ + return self._hosts[LocationMode.PRIMARY] + + @property + def secondary_endpoint(self): + """The full secondary endpoint URL if configured. + + If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str + :raise ValueError: + """ + if not self._hosts[LocationMode.SECONDARY]: + raise ValueError("No secondary host configured.") + return self._format_url(self._hosts[LocationMode.SECONDARY]) + + @property + def secondary_hostname(self): + """The hostname of the secondary endpoint. + + If not available this will be None. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str or None + """ + return self._hosts[LocationMode.SECONDARY] + + @property + def location_mode(self): + """The location mode that the client is currently using. + + By default this will be "primary". Options include "primary" and "secondary". + + :type: str + """ + + return self._location_mode + + @location_mode.setter + def location_mode(self, value): + if self._hosts.get(value): + self._location_mode = value + self._client._config.url = self.url # pylint: disable=protected-access + else: + raise ValueError("No host URL for location mode: {}".format(value)) + + @property + def api_version(self): + """The version of the Storage API used for requests. + + :type: str + """ + return self._client._config.version # pylint: disable=protected-access + + def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): + query_str = "?" + if snapshot: + query_str += "snapshot={}&".format(self.snapshot) + if share_snapshot: + query_str += "sharesnapshot={}&".format(self.snapshot) + if sas_token and isinstance(credential, AzureSasCredential): + raise ValueError( + "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") + if sas_token and not credential: + query_str += sas_token + elif is_credential_sastoken(credential): + query_str += credential.lstrip("?") + credential = None + return query_str.rstrip("?&"), credential + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, "get_token"): + self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif isinstance(credential, AzureSasCredential): + self._credential_policy = AzureSasCredentialPolicy(credential) + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + + config = kwargs.get("_configuration") or create_configuration(**kwargs) + if kwargs.get("_pipeline"): + return config, kwargs["_pipeline"] + config.transport = kwargs.get("transport") # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + config.transport = RequestsTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + ContentDecodePolicy(response_encoding="utf-8"), + RedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), + config.retry_policy, + config.headers_policy, + StorageRequestHook(**kwargs), + self._credential_policy, + config.logging_policy, + StorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs) + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, Pipeline(config.transport, policies=policies) + + def _batch_send( + self, + *reqs, # type: HttpRequest + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + batch_id = str(uuid.uuid1()) + + request = self._client._client.post( # pylint: disable=protected-access + url='{}://{}/{}?{}comp=batch{}{}'.format( + self.scheme, + self.primary_hostname, + kwargs.pop('path', ""), + kwargs.pop('restype', ""), + kwargs.pop('sas', ""), + kwargs.pop('timeout', "") + ), + headers={ + 'x-ms-version': self.api_version, + "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) + } + ) + + policies = [StorageHeadersPolicy()] + if self._credential_policy: + policies.append(self._credential_policy) + + request.set_multipart_mixed( + *reqs, + policies=policies, + enforce_https=False + ) + + Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access + body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) + request.set_bytes_body(body) + + temp = request.multipart_mixed_info + request.multipart_mixed_info = None + pipeline_response = self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + request.multipart_mixed_info = temp + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() + if raise_on_any_failure: + parts = list(response.parts()) + if any(p for p in parts if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts + ) + raise error + return iter(parts) + return parts + except HttpResponseError as error: + process_storage_error(error) + +class TransportWrapper(HttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, transport): + self._transport = transport + + def send(self, request, **kwargs): + return self._transport.send(request, **kwargs) + + def open(self): + pass + + def close(self): + pass + + def __enter__(self): + pass + + def __exit__(self, *args): # pylint: disable=arguments-differ + pass + + +def _format_shared_key_credential(account_name, credential): + if isinstance(credential, six.string_types): + if not account_name: + raise ValueError("Unable to determine account name for shared key credential.") + credential = {"account_name": account_name, "account_key": credential} + if isinstance(credential, dict): + if "account_name" not in credential: + raise ValueError("Shared key credential missing 'account_name") + if "account_key" not in credential: + raise ValueError("Shared key credential missing 'account_key") + return SharedKeyCredentialPolicy(**credential) + return credential + + +def parse_connection_str(conn_str, credential, service): + conn_str = conn_str.rstrip(";") + conn_settings = [s.split("=", 1) for s in conn_str.split(";")] + if any(len(tup) != 2 for tup in conn_settings): + raise ValueError("Connection string is either blank or malformed.") + conn_settings = dict((key.upper(), val) for key, val in conn_settings) + endpoints = _SERVICE_PARAMS[service] + primary = None + secondary = None + if not credential: + try: + credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} + except KeyError: + credential = conn_settings.get("SHAREDACCESSSIGNATURE") + if endpoints["primary"] in conn_settings: + primary = conn_settings[endpoints["primary"]] + if endpoints["secondary"] in conn_settings: + secondary = conn_settings[endpoints["secondary"]] + else: + if endpoints["secondary"] in conn_settings: + raise ValueError("Connection string specifies only secondary endpoint.") + try: + primary = "{}://{}.{}.{}".format( + conn_settings["DEFAULTENDPOINTSPROTOCOL"], + conn_settings["ACCOUNTNAME"], + service, + conn_settings["ENDPOINTSUFFIX"], + ) + secondary = "{}-secondary.{}.{}".format( + conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] + ) + except KeyError: + pass + + if not primary: + try: + primary = "https://{}.{}.{}".format( + conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) + ) + except KeyError: + raise ValueError("Connection string missing required connection details.") + return primary, secondary, credential + + +def create_configuration(**kwargs): + # type: (**Any) -> Configuration + config = Configuration(**kwargs) + config.headers_policy = StorageHeadersPolicy(**kwargs) + config.user_agent_policy = UserAgentPolicy( + sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) + config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + config.logging_policy = StorageLoggingPolicy(**kwargs) + config.proxy_policy = ProxyPolicy(**kwargs) + + # Storage settings + config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) + config.copy_polling_interval = 15 + + # Block blob uploads + config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) + config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) + config.use_byte_buffer = kwargs.get("use_byte_buffer", False) + + # Page blob uploads + config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) + + # Datalake file uploads + config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) + + # Blob downloads + config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) + config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) + + # File uploads + config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) + return config + + +def parse_query(query_str): + sas_values = QueryStringConstants.to_list() + parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} + sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] + sas_token = None + if sas_params: + sas_token = "&".join(sas_params) + + snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") + return snapshot, sas_token + + +def is_credential_sastoken(credential): + if not credential or not isinstance(credential, six.string_types): + return False + + sas_values = QueryStringConstants.to_list() + parsed_query = parse_qs(credential.lstrip("?")) + if parsed_query and all([k in sas_values for k in parsed_query.keys()]): + return True + return False diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/base_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/base_client_async.py new file mode 100644 index 0000000..091c350 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/base_client_async.py @@ -0,0 +1,183 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging + +from azure.core.credentials import AzureSasCredential +from azure.core.pipeline import AsyncPipeline +from azure.core.async_paging import AsyncList +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline.policies import ( + ContentDecodePolicy, + AsyncBearerTokenCredentialPolicy, + AsyncRedirectPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, + AzureSasCredentialPolicy, +) +from azure.core.pipeline.transport import AsyncHttpTransport + +from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .authentication import SharedKeyCredentialPolicy +from .base_client import create_configuration +from .policies import ( + StorageContentValidation, + StorageRequestHook, + StorageHosts, + StorageHeadersPolicy, + QueueMessagePolicy +) +from .policies_async import AsyncStorageResponseHook + +from .response_handlers import process_storage_error, PartialBatchErrorException + +if TYPE_CHECKING: + from azure.core.pipeline import Pipeline + from azure.core.pipeline.transport import HttpRequest + from azure.core.configuration import Configuration +_LOGGER = logging.getLogger(__name__) + + +class AsyncStorageAccountHostsMixin(object): + + def __enter__(self): + raise TypeError("Async client only supports 'async with'.") + + def __exit__(self, *args): + pass + + async def __aenter__(self): + await self._client.__aenter__() + return self + + async def __aexit__(self, *args): + await self._client.__aexit__(*args) + + async def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._client.close() + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, 'get_token'): + self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif isinstance(credential, AzureSasCredential): + self._credential_policy = AzureSasCredentialPolicy(credential) + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + config = kwargs.get('_configuration') or create_configuration(**kwargs) + if kwargs.get('_pipeline'): + return config, kwargs['_pipeline'] + config.transport = kwargs.get('transport') # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + try: + from azure.core.pipeline.transport import AioHttpTransport + except ImportError: + raise ImportError("Unable to create async transport. Please check aiohttp is installed.") + config.transport = AioHttpTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.headers_policy, + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + StorageRequestHook(**kwargs), + self._credential_policy, + ContentDecodePolicy(response_encoding="utf-8"), + AsyncRedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), # type: ignore + config.retry_policy, + config.logging_policy, + AsyncStorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs), + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, AsyncPipeline(config.transport, policies=policies) + + async def _batch_send( + self, *reqs: 'HttpRequest', + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + request = self._client._client.post( # pylint: disable=protected-access + url='https://{}/?comp=batch'.format(self.primary_hostname), + headers={ + 'x-ms-version': self.api_version + } + ) + + request.set_multipart_mixed( + *reqs, + policies=[ + StorageHeadersPolicy(), + self._credential_policy + ], + enforce_https=False + ) + + pipeline_response = await self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() # Return an AsyncIterator + if raise_on_any_failure: + parts_list = [] + async for part in parts: + parts_list.append(part) + if any(p for p in parts_list if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts_list + ) + raise error + return AsyncList(parts_list) + return parts + except HttpResponseError as error: + process_storage_error(error) + + +class AsyncTransportWrapper(AsyncHttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, async_transport): + self._transport = async_transport + + async def send(self, request, **kwargs): + return await self._transport.send(request, **kwargs) + + async def open(self): + pass + + async def close(self): + pass + + async def __aenter__(self): + pass + + async def __aexit__(self, *args): # pylint: disable=arguments-differ + pass diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/constants.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/constants.py new file mode 100644 index 0000000..a50e8b5 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/constants.py @@ -0,0 +1,27 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys + +from .._generated import AzureDataLakeStorageRESTAPI + + +X_MS_VERSION = AzureDataLakeStorageRESTAPI(url="get_api_version")._config.version # pylint: disable=protected-access + +# Socket timeout in seconds +CONNECTION_TIMEOUT = 20 +READ_TIMEOUT = 20 + +# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) +# The socket timeout is now the maximum total duration to send all data. +if sys.version_info >= (3, 5): + # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds + # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) + READ_TIMEOUT = 2000 + +STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" + +SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/encryption.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/encryption.py new file mode 100644 index 0000000..62607cc --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/encryption.py @@ -0,0 +1,542 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os +from os import urandom +from json import ( + dumps, + loads, +) +from collections import OrderedDict + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import Cipher +from cryptography.hazmat.primitives.ciphers.algorithms import AES +from cryptography.hazmat.primitives.ciphers.modes import CBC +from cryptography.hazmat.primitives.padding import PKCS7 + +from azure.core.exceptions import HttpResponseError + +from .._version import VERSION +from . import encode_base64, decode_base64_to_bytes + + +_ENCRYPTION_PROTOCOL_V1 = '1.0' +_ERROR_OBJECT_INVALID = \ + '{0} does not define a complete interface. Value of {1} is either missing or invalid.' + + +def _validate_not_none(param_name, param): + if param is None: + raise ValueError('{0} should not be None.'.format(param_name)) + + +def _validate_key_encryption_key_wrap(kek): + # Note that None is not callable and so will fail the second clause of each check. + if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) + if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) + + +class _EncryptionAlgorithm(object): + ''' + Specifies which client encryption algorithm is used. + ''' + AES_CBC_256 = 'AES_CBC_256' + + +class _WrappedContentKey: + ''' + Represents the envelope key details stored on the service. + ''' + + def __init__(self, algorithm, encrypted_key, key_id): + ''' + :param str algorithm: + The algorithm used for wrapping. + :param bytes encrypted_key: + The encrypted content-encryption-key. + :param str key_id: + The key-encryption-key identifier string. + ''' + + _validate_not_none('algorithm', algorithm) + _validate_not_none('encrypted_key', encrypted_key) + _validate_not_none('key_id', key_id) + + self.algorithm = algorithm + self.encrypted_key = encrypted_key + self.key_id = key_id + + +class _EncryptionAgent: + ''' + Represents the encryption agent stored on the service. + It consists of the encryption protocol version and encryption algorithm used. + ''' + + def __init__(self, encryption_algorithm, protocol): + ''' + :param _EncryptionAlgorithm encryption_algorithm: + The algorithm used for encrypting the message contents. + :param str protocol: + The protocol version used for encryption. + ''' + + _validate_not_none('encryption_algorithm', encryption_algorithm) + _validate_not_none('protocol', protocol) + + self.encryption_algorithm = str(encryption_algorithm) + self.protocol = protocol + + +class _EncryptionData: + ''' + Represents the encryption data that is stored on the service. + ''' + + def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, + key_wrapping_metadata): + ''' + :param bytes content_encryption_IV: + The content encryption initialization vector. + :param _EncryptionAgent encryption_agent: + The encryption agent. + :param _WrappedContentKey wrapped_content_key: + An object that stores the wrapping algorithm, the key identifier, + and the encrypted key bytes. + :param dict key_wrapping_metadata: + A dict containing metadata related to the key wrapping. + ''' + + _validate_not_none('content_encryption_IV', content_encryption_IV) + _validate_not_none('encryption_agent', encryption_agent) + _validate_not_none('wrapped_content_key', wrapped_content_key) + + self.content_encryption_IV = content_encryption_IV + self.encryption_agent = encryption_agent + self.wrapped_content_key = wrapped_content_key + self.key_wrapping_metadata = key_wrapping_metadata + + +def _generate_encryption_data_dict(kek, cek, iv): + ''' + Generates and returns the encryption metadata as a dict. + + :param object kek: The key encryption key. See calling functions for more information. + :param bytes cek: The content encryption key. + :param bytes iv: The initialization vector. + :return: A dict containing all the encryption metadata. + :rtype: dict + ''' + # Encrypt the cek. + wrapped_cek = kek.wrap_key(cek) + + # Build the encryption_data dict. + # Use OrderedDict to comply with Java's ordering requirement. + wrapped_content_key = OrderedDict() + wrapped_content_key['KeyId'] = kek.get_kid() + wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) + wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() + + encryption_agent = OrderedDict() + encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 + encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 + + encryption_data_dict = OrderedDict() + encryption_data_dict['WrappedContentKey'] = wrapped_content_key + encryption_data_dict['EncryptionAgent'] = encryption_agent + encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) + encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} + + return encryption_data_dict + + +def _dict_to_encryption_data(encryption_data_dict): + ''' + Converts the specified dictionary to an EncryptionData object for + eventual use in decryption. + + :param dict encryption_data_dict: + The dictionary containing the encryption data. + :return: an _EncryptionData object built from the dictionary. + :rtype: _EncryptionData + ''' + try: + if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: + raise ValueError("Unsupported encryption version.") + except KeyError: + raise ValueError("Unsupported encryption version.") + wrapped_content_key = encryption_data_dict['WrappedContentKey'] + wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], + decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), + wrapped_content_key['KeyId']) + + encryption_agent = encryption_data_dict['EncryptionAgent'] + encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], + encryption_agent['Protocol']) + + if 'KeyWrappingMetadata' in encryption_data_dict: + key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] + else: + key_wrapping_metadata = None + + encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), + encryption_agent, + wrapped_content_key, + key_wrapping_metadata) + + return encryption_data + + +def _generate_AES_CBC_cipher(cek, iv): + ''' + Generates and returns an encryption cipher for AES CBC using the given cek and iv. + + :param bytes[] cek: The content encryption key for the cipher. + :param bytes[] iv: The initialization vector for the cipher. + :return: A cipher for encrypting in AES256 CBC. + :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher + ''' + + backend = default_backend() + algorithm = AES(cek) + mode = CBC(iv) + return Cipher(algorithm, mode, backend) + + +def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): + ''' + Extracts and returns the content_encryption_key stored in the encryption_data object + and performs necessary validation on all parameters. + :param _EncryptionData encryption_data: + The encryption metadata of the retrieved value. + :param obj key_encryption_key: + The key_encryption_key used to unwrap the cek. Please refer to high-level service object + instance variables for more details. + :param func key_resolver: + A function used that, given a key_id, will return a key_encryption_key. Please refer + to high-level service object instance variables for more details. + :return: the content_encryption_key stored in the encryption_data object. + :rtype: bytes[] + ''' + + _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) + _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) + + if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: + raise ValueError('Encryption version is not supported.') + + content_encryption_key = None + + # If the resolver exists, give priority to the key it finds. + if key_resolver is not None: + key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) + + _validate_not_none('key_encryption_key', key_encryption_key) + if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) + if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): + raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') + # Will throw an exception if the specified algorithm is not supported. + content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, + encryption_data.wrapped_content_key.algorithm) + _validate_not_none('content_encryption_key', content_encryption_key) + + return content_encryption_key + + +def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): + ''' + Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. + Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). + Returns the original plaintex. + + :param str message: + The ciphertext to be decrypted. + :param _EncryptionData encryption_data: + The metadata associated with this ciphertext. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted plaintext. + :rtype: str + ''' + _validate_not_none('message', message) + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) + + if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: + raise ValueError('Specified encryption algorithm is not supported.') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) + + # decrypt data + decrypted_data = message + decryptor = cipher.decryptor() + decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) + + # unpad data + unpadder = PKCS7(128).unpadder() + decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) + + return decrypted_data + + +def encrypt_blob(blob, key_encryption_key): + ''' + Encrypts the given blob using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encryption metadata. This method should + only be used when a blob is small enough for single shot upload. Encrypting larger blobs + is done as a part of the upload_data_chunks method. + + :param bytes blob: + The blob to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. + :rtype: (str, bytes) + ''' + + _validate_not_none('blob', blob) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(blob) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + + return dumps(encryption_data), encrypted_data + + +def generate_blob_encryption_data(key_encryption_key): + ''' + Generates the encryption_metadata for the blob. + + :param bytes key_encryption_key: + The key-encryption-key used to wrap the cek associate with this blob. + :return: A tuple containing the cek and iv for this blob as well as the + serialized encryption metadata for the blob. + :rtype: (bytes, bytes, str) + ''' + encryption_data = None + content_encryption_key = None + initialization_vector = None + if key_encryption_key: + _validate_key_encryption_key_wrap(key_encryption_key) + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + encryption_data = _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + encryption_data = dumps(encryption_data) + + return content_encryption_key, initialization_vector, encryption_data + + +def decrypt_blob(require_encryption, key_encryption_key, key_resolver, + content, start_offset, end_offset, response_headers): + ''' + Decrypts the given blob contents and returns only the requested range. + + :param bool require_encryption: + Whether or not the calling blob service requires objects to be decrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :param key_resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted blob content. + :rtype: bytes + ''' + try: + encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) + except: # pylint: disable=bare-except + if require_encryption: + raise ValueError( + 'Encryption required, but received data does not contain appropriate metatadata.' + \ + 'Data was either not encrypted or metadata has been lost.') + + return content + + if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: + raise ValueError('Specified encryption algorithm is not supported.') + + blob_type = response_headers['x-ms-blob-type'] + + iv = None + unpad = False + if 'content-range' in response_headers: + content_range = response_headers['content-range'] + # Format: 'bytes x-y/size' + + # Ignore the word 'bytes' + content_range = content_range.split(' ') + + content_range = content_range[1].split('-') + content_range = content_range[1].split('/') + end_range = int(content_range[0]) + blob_size = int(content_range[1]) + + if start_offset >= 16: + iv = content[:16] + content = content[16:] + start_offset -= 16 + else: + iv = encryption_data.content_encryption_IV + + if end_range == blob_size - 1: + unpad = True + else: + unpad = True + iv = encryption_data.content_encryption_IV + + if blob_type == 'PageBlob': + unpad = False + + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) + cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) + decryptor = cipher.decryptor() + + content = decryptor.update(content) + decryptor.finalize() + if unpad: + unpadder = PKCS7(128).unpadder() + content = unpadder.update(content) + unpadder.finalize() + + return content[start_offset: len(content) - end_offset] + + +def get_blob_encryptor_and_padder(cek, iv, should_pad): + encryptor = None + padder = None + + if cek is not None and iv is not None: + cipher = _generate_AES_CBC_cipher(cek, iv) + encryptor = cipher.encryptor() + padder = PKCS7(128).padder() if should_pad else None + + return encryptor, padder + + +def encrypt_queue_message(message, key_encryption_key): + ''' + Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encrypted message and the encryption metadata. + + :param object message: + The plain text messge to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A json-formatted string containing the encrypted message and the encryption metadata. + :rtype: str + ''' + + _validate_not_none('message', message) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = os.urandom(32) + initialization_vector = os.urandom(16) + + # Queue encoding functions all return unicode strings, and encryption should + # operate on binary strings. + message = message.encode('utf-8') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(message) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + + # Build the dictionary structure. + queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), + 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector)} + + return dumps(queue_message) + + +def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): + ''' + Returns the decrypted message contents from an EncryptedQueueMessage. + If no encryption metadata is present, will return the unaltered message. + :param str message: + The JSON formatted QueueEncryptedMessage contents with all associated metadata. + :param bool require_encryption: + If set, will enforce that the retrieved messages are encrypted and decrypt them. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The plain text message from the queue message. + :rtype: str + ''' + + try: + message = loads(message) + + encryption_data = _dict_to_encryption_data(message['EncryptionData']) + decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) + except (KeyError, ValueError): + # Message was not json formatted and so was not encrypted + # or the user provided a json formatted message. + if require_encryption: + raise ValueError('Message was not encrypted.') + + return message + try: + return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=response, + error=error) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/models.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/models.py new file mode 100644 index 0000000..0aeb96a --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/models.py @@ -0,0 +1,468 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-instance-attributes + +from enum import Enum + + +def get_enum_value(value): + if value is None or value in ["None", ""]: + return None + try: + return value.value + except AttributeError: + return value + + +class StorageErrorCode(str, Enum): + + # Generic storage values + account_already_exists = "AccountAlreadyExists" + account_being_created = "AccountBeingCreated" + account_is_disabled = "AccountIsDisabled" + authentication_failed = "AuthenticationFailed" + authorization_failure = "AuthorizationFailure" + no_authentication_information = "NoAuthenticationInformation" + condition_headers_not_supported = "ConditionHeadersNotSupported" + condition_not_met = "ConditionNotMet" + empty_metadata_key = "EmptyMetadataKey" + insufficient_account_permissions = "InsufficientAccountPermissions" + internal_error = "InternalError" + invalid_authentication_info = "InvalidAuthenticationInfo" + invalid_header_value = "InvalidHeaderValue" + invalid_http_verb = "InvalidHttpVerb" + invalid_input = "InvalidInput" + invalid_md5 = "InvalidMd5" + invalid_metadata = "InvalidMetadata" + invalid_query_parameter_value = "InvalidQueryParameterValue" + invalid_range = "InvalidRange" + invalid_resource_name = "InvalidResourceName" + invalid_uri = "InvalidUri" + invalid_xml_document = "InvalidXmlDocument" + invalid_xml_node_value = "InvalidXmlNodeValue" + md5_mismatch = "Md5Mismatch" + metadata_too_large = "MetadataTooLarge" + missing_content_length_header = "MissingContentLengthHeader" + missing_required_query_parameter = "MissingRequiredQueryParameter" + missing_required_header = "MissingRequiredHeader" + missing_required_xml_node = "MissingRequiredXmlNode" + multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" + operation_timed_out = "OperationTimedOut" + out_of_range_input = "OutOfRangeInput" + out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" + request_body_too_large = "RequestBodyTooLarge" + resource_type_mismatch = "ResourceTypeMismatch" + request_url_failed_to_parse = "RequestUrlFailedToParse" + resource_already_exists = "ResourceAlreadyExists" + resource_not_found = "ResourceNotFound" + server_busy = "ServerBusy" + unsupported_header = "UnsupportedHeader" + unsupported_xml_node = "UnsupportedXmlNode" + unsupported_query_parameter = "UnsupportedQueryParameter" + unsupported_http_verb = "UnsupportedHttpVerb" + + # Blob values + append_position_condition_not_met = "AppendPositionConditionNotMet" + blob_already_exists = "BlobAlreadyExists" + blob_not_found = "BlobNotFound" + blob_overwritten = "BlobOverwritten" + blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" + block_count_exceeds_limit = "BlockCountExceedsLimit" + block_list_too_long = "BlockListTooLong" + cannot_change_to_lower_tier = "CannotChangeToLowerTier" + cannot_verify_copy_source = "CannotVerifyCopySource" + container_already_exists = "ContainerAlreadyExists" + container_being_deleted = "ContainerBeingDeleted" + container_disabled = "ContainerDisabled" + container_not_found = "ContainerNotFound" + content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" + copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" + copy_id_mismatch = "CopyIdMismatch" + feature_version_mismatch = "FeatureVersionMismatch" + incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" + incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" + infinite_lease_duration_required = "InfiniteLeaseDurationRequired" + invalid_blob_or_block = "InvalidBlobOrBlock" + invalid_blob_tier = "InvalidBlobTier" + invalid_blob_type = "InvalidBlobType" + invalid_block_id = "InvalidBlockId" + invalid_block_list = "InvalidBlockList" + invalid_operation = "InvalidOperation" + invalid_page_range = "InvalidPageRange" + invalid_source_blob_type = "InvalidSourceBlobType" + invalid_source_blob_url = "InvalidSourceBlobUrl" + invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" + lease_already_present = "LeaseAlreadyPresent" + lease_already_broken = "LeaseAlreadyBroken" + lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" + lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" + lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" + lease_id_missing = "LeaseIdMissing" + lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" + lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" + lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" + lease_lost = "LeaseLost" + lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" + lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" + lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" + max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" + no_pending_copy_operation = "NoPendingCopyOperation" + operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" + pending_copy_operation = "PendingCopyOperation" + previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" + previous_snapshot_not_found = "PreviousSnapshotNotFound" + previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" + sequence_number_condition_not_met = "SequenceNumberConditionNotMet" + sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" + snapshot_count_exceeded = "SnapshotCountExceeded" + snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" + snapshots_present = "SnapshotsPresent" + source_condition_not_met = "SourceConditionNotMet" + system_in_use = "SystemInUse" + target_condition_not_met = "TargetConditionNotMet" + unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" + blob_being_rehydrated = "BlobBeingRehydrated" + blob_archived = "BlobArchived" + blob_not_archived = "BlobNotArchived" + + # Queue values + invalid_marker = "InvalidMarker" + message_not_found = "MessageNotFound" + message_too_large = "MessageTooLarge" + pop_receipt_mismatch = "PopReceiptMismatch" + queue_already_exists = "QueueAlreadyExists" + queue_being_deleted = "QueueBeingDeleted" + queue_disabled = "QueueDisabled" + queue_not_empty = "QueueNotEmpty" + queue_not_found = "QueueNotFound" + + # File values + cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" + client_cache_flush_delay = "ClientCacheFlushDelay" + delete_pending = "DeletePending" + directory_not_empty = "DirectoryNotEmpty" + file_lock_conflict = "FileLockConflict" + invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" + parent_not_found = "ParentNotFound" + read_only_attribute = "ReadOnlyAttribute" + share_already_exists = "ShareAlreadyExists" + share_being_deleted = "ShareBeingDeleted" + share_disabled = "ShareDisabled" + share_not_found = "ShareNotFound" + sharing_violation = "SharingViolation" + share_snapshot_in_progress = "ShareSnapshotInProgress" + share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" + share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" + share_has_snapshots = "ShareHasSnapshots" + container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" + + # DataLake values + content_length_must_be_zero = 'ContentLengthMustBeZero' + path_already_exists = 'PathAlreadyExists' + invalid_flush_position = 'InvalidFlushPosition' + invalid_property_name = 'InvalidPropertyName' + invalid_source_uri = 'InvalidSourceUri' + unsupported_rest_version = 'UnsupportedRestVersion' + file_system_not_found = 'FilesystemNotFound' + path_not_found = 'PathNotFound' + rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' + source_path_not_found = 'SourcePathNotFound' + destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' + file_system_already_exists = 'FilesystemAlreadyExists' + file_system_being_deleted = 'FilesystemBeingDeleted' + invalid_destination_path = 'InvalidDestinationPath' + invalid_rename_source_path = 'InvalidRenameSourcePath' + invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' + lease_is_already_broken = 'LeaseIsAlreadyBroken' + lease_name_mismatch = 'LeaseNameMismatch' + path_conflict = 'PathConflict' + source_path_is_being_deleted = 'SourcePathIsBeingDeleted' + + +class DictMixin(object): + + def __setitem__(self, key, item): + self.__dict__[key] = item + + def __getitem__(self, key): + return self.__dict__[key] + + def __repr__(self): + return str(self) + + def __len__(self): + return len(self.keys()) + + def __delitem__(self, key): + self.__dict__[key] = None + + def __eq__(self, other): + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self): + return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) + + def has_key(self, k): + return k in self.__dict__ + + def update(self, *args, **kwargs): + return self.__dict__.update(*args, **kwargs) + + def keys(self): + return [k for k in self.__dict__ if not k.startswith('_')] + + def values(self): + return [v for k, v in self.__dict__.items() if not k.startswith('_')] + + def items(self): + return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] + + def get(self, key, default=None): + if key in self.__dict__: + return self.__dict__[key] + return default + + +class LocationMode(object): + """ + Specifies the location the request should be sent to. This mode only applies + for RA-GRS accounts which allow secondary read access. All other account types + must use PRIMARY. + """ + + PRIMARY = 'primary' #: Requests should be sent to the primary location. + SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. + + +class ResourceTypes(object): + """ + Specifies the resource types that are accessible with the account SAS. + + :param bool service: + Access to service-level APIs (e.g., Get/Set Service Properties, + Get Service Stats, List Containers/Queues/Shares) + :param bool container: + Access to container-level APIs (e.g., Create/Delete Container, + Create/Delete Queue, Create/Delete Share, + List Blobs/Files and Directories) + :param bool object: + Access to object-level APIs for blobs, queue messages, and + files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) + """ + + def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin + self.service = service + self.container = container + self.object = object + self._str = (('s' if self.service else '') + + ('c' if self.container else '') + + ('o' if self.object else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create a ResourceTypes from a string. + + To specify service, container, or object you need only to + include the first letter of the word in the string. E.g. service and container, + you would provide a string "sc". + + :param str string: Specify service, container, or object in + in the string with the first letter of the word. + :return: A ResourceTypes object + :rtype: ~azure.storage.blob.ResourceTypes + """ + res_service = 's' in string + res_container = 'c' in string + res_object = 'o' in string + + parsed = cls(res_service, res_container, res_object) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class AccountSasPermissions(object): + """ + :class:`~ResourceTypes` class to be used with generate_account_sas + function and for the AccessPolicies used with set_*_acl. There are two types of + SAS which may be used to grant resource access. One is to grant access to a + specific resource (resource-specific). Another is to grant access to the + entire service for a specific account and allow certain operations based on + perms found here. + + :param bool read: + Valid for all signed resources types (Service, Container, and Object). + Permits read permissions to the specified resource type. + :param bool write: + Valid for all signed resources types (Service, Container, and Object). + Permits write permissions to the specified resource type. + :param bool delete: + Valid for Container and Object resource types, except for queue messages. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool list: + Valid for Service and Container resource types only. + :param bool add: + Valid for the following Object resource types only: queue messages, and append blobs. + :param bool create: + Valid for the following Object resource types only: blobs and files. + Users can create new blobs or files, but may not overwrite existing + blobs or files. + :param bool update: + Valid for the following Object resource types only: queue messages. + :param bool process: + Valid for the following Object resource type only: queue messages. + :keyword bool tag: + To enable set or get tags on the blobs in the container. + :keyword bool filter_by_tags: + To enable get blobs by tags, this should be used together with list permission. + """ + def __init__(self, read=False, write=False, delete=False, + list=False, # pylint: disable=redefined-builtin + add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): + self.read = read + self.write = write + self.delete = delete + self.delete_previous_version = delete_previous_version + self.list = list + self.add = add + self.create = create + self.update = update + self.process = process + self.tag = kwargs.pop('tag', False) + self.filter_by_tags = kwargs.pop('filter_by_tags', False) + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('l' if self.list else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('u' if self.update else '') + + ('p' if self.process else '') + + ('f' if self.filter_by_tags else '') + + ('t' if self.tag else '') + ) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create AccountSasPermissions from a string. + + To specify read, write, delete, etc. permissions you need only to + include the first letter of the word in the string. E.g. for read and write + permissions you would provide a string "rw". + + :param str permission: Specify permissions in + the string with the first letter of the word. + :return: An AccountSasPermissions object + :rtype: ~azure.storage.filedatalake.AccountSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission + p_list = 'l' in permission + p_add = 'a' in permission + p_create = 'c' in permission + p_update = 'u' in permission + p_process = 'p' in permission + p_tag = 't' in permission + p_filter_by_tags = 'f' in permission + parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, + list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, + filter_by_tags=p_filter_by_tags) + + return parsed + + +class Services(object): + """Specifies the services accessible with the account SAS. + + :param bool blob: + Access for the `~azure.storage.blob.BlobServiceClient` + :param bool queue: + Access for the `~azure.storage.queue.QueueServiceClient` + :param bool fileshare: + Access for the `~azure.storage.fileshare.ShareServiceClient` + """ + + def __init__(self, blob=False, queue=False, fileshare=False): + self.blob = blob + self.queue = queue + self.fileshare = fileshare + self._str = (('b' if self.blob else '') + + ('q' if self.queue else '') + + ('f' if self.fileshare else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create Services from a string. + + To specify blob, queue, or file you need only to + include the first letter of the word in the string. E.g. for blob and queue + you would provide a string "bq". + + :param str string: Specify blob, queue, or file in + in the string with the first letter of the word. + :return: A Services object + :rtype: ~azure.storage.blob.Services + """ + res_blob = 'b' in string + res_queue = 'q' in string + res_file = 'f' in string + + parsed = cls(res_blob, res_queue, res_file) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class UserDelegationKey(object): + """ + Represents a user delegation key, provided to the user by Azure Storage + based on their Azure Active Directory access token. + + The fields are saved as simple strings since the user does not have to interact with this object; + to generate an identify SAS, the user can simply pass it to the right API. + + :ivar str signed_oid: + Object ID of this token. + :ivar str signed_tid: + Tenant ID of the tenant that issued this token. + :ivar str signed_start: + The datetime this token becomes valid. + :ivar str signed_expiry: + The datetime this token expires. + :ivar str signed_service: + What service this key is valid for. + :ivar str signed_version: + The version identifier of the REST service that created this token. + :ivar str value: + The user delegation key. + """ + def __init__(self): + self.signed_oid = None + self.signed_tid = None + self.signed_start = None + self.signed_expiry = None + self.signed_service = None + self.signed_version = None + self.value = None diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/parser.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/parser.py new file mode 100644 index 0000000..c6feba8 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/parser.py @@ -0,0 +1,20 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys + +if sys.version_info < (3,): + def _str(value): + if isinstance(value, unicode): # pylint: disable=undefined-variable + return value.encode('utf-8') + + return str(value) +else: + _str = str + + +def _to_utc_datetime(value): + return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/policies.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/policies.py new file mode 100644 index 0000000..11fc984 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/policies.py @@ -0,0 +1,608 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import re +import random +from time import time +from io import SEEK_SET, UnsupportedOperation +import logging +import uuid +import types +from typing import Any, TYPE_CHECKING +from wsgiref.handlers import format_date_time +try: + from urllib.parse import ( + urlparse, + parse_qsl, + urlunparse, + urlencode, + ) +except ImportError: + from urllib import urlencode # type: ignore + from urlparse import ( # type: ignore + urlparse, + parse_qsl, + urlunparse, + ) + +from azure.core.pipeline.policies import ( + HeadersPolicy, + SansIOHTTPPolicy, + NetworkTraceLoggingPolicy, + HTTPPolicy, + RequestHistory +) +from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError + +from .models import LocationMode + +try: + _unicode_type = unicode # type: ignore +except NameError: + _unicode_type = str + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +def encode_base64(data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def is_exhausted(settings): + """Are we out of retries?""" + retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) + retry_counts = list(filter(None, retry_counts)) + if not retry_counts: + return False + return min(retry_counts) < 0 + + +def retry_hook(settings, **kwargs): + if settings['hook']: + settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) + + +def is_retry(response, mode): + """Is this method/status code retryable? (Based on whitelists and control + variables such as the number of total retries to allow, whether to + respect the Retry-After header, whether this header is present, and + whether the returned status code is on the list of status codes to + be retried upon on the presence of the aforementioned header) + """ + status = response.http_response.status_code + if 300 <= status < 500: + # An exception occured, but in most cases it was expected. Examples could + # include a 309 Conflict or 412 Precondition Failed. + if status == 404 and mode == LocationMode.SECONDARY: + # Response code 404 should be retried if secondary was used. + return True + if status == 408: + # Response code 408 is a timeout and should be retried. + return True + return False + if status >= 500: + # Response codes above 500 with the exception of 501 Not Implemented and + # 505 Version Not Supported indicate a server issue and should be retried. + if status in [501, 505]: + return False + return True + return False + + +def urljoin(base_url, stub_url): + parsed = urlparse(base_url) + parsed = parsed._replace(path=parsed.path + '/' + stub_url) + return parsed.geturl() + + +class QueueMessagePolicy(SansIOHTTPPolicy): + + def on_request(self, request): + message_id = request.context.options.pop('queue_message_id', None) + if message_id: + request.http_request.url = urljoin( + request.http_request.url, + message_id) + + +class StorageHeadersPolicy(HeadersPolicy): + request_id_header_name = 'x-ms-client-request-id' + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + super(StorageHeadersPolicy, self).on_request(request) + current_time = format_date_time(time()) + request.http_request.headers['x-ms-date'] = current_time + + custom_id = request.context.options.pop('client_request_id', None) + request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) + + # def on_response(self, request, response): + # # raise exception if the echoed client request id from the service is not identical to the one we sent + # if self.request_id_header_name in response.http_response.headers: + + # client_request_id = request.http_request.headers.get(self.request_id_header_name) + + # if response.http_response.headers[self.request_id_header_name] != client_request_id: + # raise AzureError( + # "Echoed client request ID: {} does not match sent client request ID: {}. " + # "Service request ID: {}".format( + # response.http_response.headers[self.request_id_header_name], client_request_id, + # response.http_response.headers['x-ms-request-id']), + # response=response.http_response + # ) + + +class StorageHosts(SansIOHTTPPolicy): + + def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument + self.hosts = hosts + super(StorageHosts, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + request.context.options['hosts'] = self.hosts + parsed_url = urlparse(request.http_request.url) + + # Detect what location mode we're currently requesting with + location_mode = LocationMode.PRIMARY + for key, value in self.hosts.items(): + if parsed_url.netloc == value: + location_mode = key + + # See if a specific location mode has been specified, and if so, redirect + use_location = request.context.options.pop('use_location', None) + if use_location: + # Lock retries to the specific location + request.context.options['retry_to_secondary'] = False + if use_location not in self.hosts: + raise ValueError("Attempting to use undefined host location {}".format(use_location)) + if use_location != location_mode: + # Update request URL to use the specified location + updated = parsed_url._replace(netloc=self.hosts[use_location]) + request.http_request.url = updated.geturl() + location_mode = use_location + + request.context.options['location_mode'] = location_mode + + +class StorageLoggingPolicy(NetworkTraceLoggingPolicy): + """A policy that logs HTTP request and response to the DEBUG logger. + + This accepts both global configuration, and per-request level with "enable_http_logger" + """ + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + http_request = request.http_request + options = request.context.options + if options.pop("logging_enable", self.enable_http_logger): + request.context["logging_enable"] = True + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + log_url = http_request.url + query_params = http_request.query + if 'sig' in query_params: + log_url = log_url.replace(query_params['sig'], "sig=*****") + _LOGGER.debug("Request URL: %r", log_url) + _LOGGER.debug("Request method: %r", http_request.method) + _LOGGER.debug("Request headers:") + for header, value in http_request.headers.items(): + if header.lower() == 'authorization': + value = '*****' + elif header.lower() == 'x-ms-copy-source' and 'sig' in value: + # take the url apart and scrub away the signed signature + scheme, netloc, path, params, query, fragment = urlparse(value) + parsed_qs = dict(parse_qsl(query)) + parsed_qs['sig'] = '*****' + + # the SAS needs to be put back together + value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) + + _LOGGER.debug(" %r: %r", header, value) + _LOGGER.debug("Request body:") + + # We don't want to log the binary data of a file upload. + if isinstance(http_request.body, types.GeneratorType): + _LOGGER.debug("File upload") + else: + _LOGGER.debug(str(http_request.body)) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log request: %r", err) + + def on_response(self, request, response): + # type: (PipelineRequest, PipelineResponse, Any) -> None + if response.context.pop("logging_enable", self.enable_http_logger): + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + _LOGGER.debug("Response status: %r", response.http_response.status_code) + _LOGGER.debug("Response headers:") + for res_header, value in response.http_response.headers.items(): + _LOGGER.debug(" %r: %r", res_header, value) + + # We don't want to log binary data if the response is a file. + _LOGGER.debug("Response content:") + pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) + header = response.http_response.headers.get('content-disposition') + + if header and pattern.match(header): + filename = header.partition('=')[2] + _LOGGER.debug("File attachments: %s", filename) + elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): + _LOGGER.debug("Body contains binary data.") + elif response.http_response.headers.get("content-type", "").startswith("image"): + _LOGGER.debug("Body contains image data.") + else: + if response.context.options.get('stream', False): + _LOGGER.debug("Body is streamable") + else: + _LOGGER.debug(response.http_response.text()) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log response: %s", repr(err)) + + +class StorageRequestHook(SansIOHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._request_callback = kwargs.get('raw_request_hook') + super(StorageRequestHook, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, **Any) -> PipelineResponse + request_callback = request.context.options.pop('raw_request_hook', self._request_callback) + if request_callback: + request_callback(request) + + +class StorageResponseHook(HTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(StorageResponseHook, self).__init__() + + def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = self.next.send(request) + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + response_callback(response) + request.context['response_callback'] = response_callback + return response + + +class StorageContentValidation(SansIOHTTPPolicy): + """A simple policy that sends the given headers + with the request. + + This will overwrite any headers already defined in the request. + """ + header_name = 'Content-MD5' + + def __init__(self, **kwargs): # pylint: disable=unused-argument + super(StorageContentValidation, self).__init__() + + @staticmethod + def get_content_md5(data): + md5 = hashlib.md5() # nosec + if isinstance(data, bytes): + md5.update(data) + elif hasattr(data, 'read'): + pos = 0 + try: + pos = data.tell() + except: # pylint: disable=bare-except + pass + for chunk in iter(lambda: data.read(4096), b""): + md5.update(chunk) + try: + data.seek(pos, SEEK_SET) + except (AttributeError, IOError): + raise ValueError("Data should be bytes or a seekable file-like object.") + else: + raise ValueError("Data should be bytes or a seekable file-like object.") + + return md5.digest() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + validate_content = request.context.options.pop('validate_content', False) + if validate_content and request.http_request.method != 'GET': + computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) + request.http_request.headers[self.header_name] = computed_md5 + request.context['validate_content_md5'] = computed_md5 + request.context['validate_content'] = validate_content + + def on_response(self, request, response): + if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): + computed_md5 = request.context.get('validate_content_md5') or \ + encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) + if response.http_response.headers['content-md5'] != computed_md5: + raise AzureError( + 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( + response.http_response.headers['content-md5'], computed_md5), + response=response.http_response + ) + + +class StorageRetryPolicy(HTTPPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + def __init__(self, **kwargs): + self.total_retries = kwargs.pop('retry_total', 10) + self.connect_retries = kwargs.pop('retry_connect', 3) + self.read_retries = kwargs.pop('retry_read', 3) + self.status_retries = kwargs.pop('retry_status', 3) + self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) + super(StorageRetryPolicy, self).__init__() + + def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use + """ + A function which sets the next host location on the request, if applicable. + + :param ~azure.storage.models.RetryContext context: + The retry context containing the previous host location and the request + to evaluate and possibly modify. + """ + if settings['hosts'] and all(settings['hosts'].values()): + url = urlparse(request.url) + # If there's more than one possible location, retry to the alternative + if settings['mode'] == LocationMode.PRIMARY: + settings['mode'] = LocationMode.SECONDARY + else: + settings['mode'] = LocationMode.PRIMARY + updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) + request.url = updated.geturl() + + def configure_retries(self, request): # pylint: disable=no-self-use + body_position = None + if hasattr(request.http_request.body, 'read'): + try: + body_position = request.http_request.body.tell() + except (AttributeError, UnsupportedOperation): + # if body position cannot be obtained, then retries will not work + pass + options = request.context.options + return { + 'total': options.pop("retry_total", self.total_retries), + 'connect': options.pop("retry_connect", self.connect_retries), + 'read': options.pop("retry_read", self.read_retries), + 'status': options.pop("retry_status", self.status_retries), + 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), + 'mode': options.pop("location_mode", LocationMode.PRIMARY), + 'hosts': options.pop("hosts", None), + 'hook': options.pop("retry_hook", None), + 'body_position': body_position, + 'count': 0, + 'history': [] + } + + def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use + """ Formula for computing the current backoff. + Should be calculated by child class. + + :rtype: float + """ + return 0 + + def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + transport.sleep(backoff) + + def increment(self, settings, request, response=None, error=None): + """Increment the retry counters. + + :param response: A pipeline response object. + :param error: An error encountered during the request, or + None if the response was received successfully. + + :return: Whether the retry attempts are exhausted. + """ + settings['total'] -= 1 + + if error and isinstance(error, ServiceRequestError): + # Errors when we're fairly sure that the server did not receive the + # request, so it should be safe to retry. + settings['connect'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + elif error and isinstance(error, ServiceResponseError): + # Errors that occur after the request has been started, so we should + # assume that the server began processing it. + settings['read'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + else: + # Incrementing because of a server error like a 500 in + # status_forcelist and a the given method is in the whitelist + if response: + settings['status'] -= 1 + settings['history'].append(RequestHistory(request, http_response=response)) + + if not is_exhausted(settings): + if request.method not in ['PUT'] and settings['retry_secondary']: + self._set_next_host_location(settings, request) + + # rewind the request body if it is a stream + if request.body and hasattr(request.body, 'read'): + # no position was saved, then retry would not work + if settings['body_position'] is None: + return False + try: + # attempt to rewind the body to the initial position + request.body.seek(settings['body_position'], SEEK_SET) + except (UnsupportedOperation, ValueError): + # if body is not seekable, then retry would not work + return False + settings['count'] += 1 + return True + return False + + def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(StorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(StorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/policies_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/policies_async.py new file mode 100644 index 0000000..e0926b8 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/policies_async.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import asyncio +import random +import logging +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline.policies import AsyncHTTPPolicy +from azure.core.exceptions import AzureError + +from .policies import is_retry, StorageRetryPolicy + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +async def retry_hook(settings, **kwargs): + if settings['hook']: + if asyncio.iscoroutine(settings['hook']): + await settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + else: + settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + + +class AsyncStorageResponseHook(AsyncHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(AsyncStorageResponseHook, self).__init__() + + async def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = await self.next.send(request) + await response.http_response.load_body() + + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + if asyncio.iscoroutine(response_callback): + await response_callback(response) + else: + response_callback(response) + request.context['response_callback'] = response_callback + return response + +class AsyncStorageRetryPolicy(StorageRetryPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + async def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + await transport.sleep(backoff) + + async def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = await self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + await self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + await self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(AsyncStorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(AsyncStorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/request_handlers.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/request_handlers.py new file mode 100644 index 0000000..37354d7 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/request_handlers.py @@ -0,0 +1,273 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) + +import logging +from os import fstat +from io import (SEEK_END, SEEK_SET, UnsupportedOperation) + +import isodate + +from azure.core.exceptions import raise_with_traceback + + +_LOGGER = logging.getLogger(__name__) + +_REQUEST_DELIMITER_PREFIX = "batch_" +_HTTP1_1_IDENTIFIER = "HTTP/1.1" +_HTTP_LINE_ENDING = "\r\n" + + +def serialize_iso(attr): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: ValueError if format invalid. + """ + if not attr: + return None + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, + utc.tm_hour, utc.tm_min, utc.tm_sec) + return date + 'Z' + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise_with_traceback(ValueError, msg, err) + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise_with_traceback(TypeError, msg, err) + + +def get_length(data): + length = None + # Check if object implements the __len__ method, covers most input cases such as bytearray. + try: + length = len(data) + except: # pylint: disable=bare-except + pass + + if not length: + # Check if the stream is a file-like stream object. + # If so, calculate the size using the file descriptor. + try: + fileno = data.fileno() + except (AttributeError, UnsupportedOperation): + pass + else: + try: + return fstat(fileno).st_size + except OSError: + # Not a valid fileno, may be possible requests returned + # a socket number? + pass + + # If the stream is seekable and tell() is implemented, calculate the stream size. + try: + current_position = data.tell() + data.seek(0, SEEK_END) + length = data.tell() - current_position + data.seek(current_position, SEEK_SET) + except (AttributeError, UnsupportedOperation): + pass + + return length + + +def read_length(data): + try: + if hasattr(data, 'read'): + read_data = b'' + for chunk in iter(lambda: data.read(4096), b""): + read_data += chunk + return len(read_data), read_data + if hasattr(data, '__iter__'): + read_data = b'' + for chunk in data: + read_data += chunk + return len(read_data), read_data + except: # pylint: disable=bare-except + pass + raise ValueError("Unable to calculate content length, please specify.") + + +def validate_and_format_range_headers( + start_range, end_range, start_range_required=True, + end_range_required=True, check_content_md5=False, align_to_page=False): + # If end range is provided, start range must be provided + if (start_range_required or end_range is not None) and start_range is None: + raise ValueError("start_range value cannot be None.") + if end_range_required and end_range is None: + raise ValueError("end_range value cannot be None.") + + # Page ranges must be 512 aligned + if align_to_page: + if start_range is not None and start_range % 512 != 0: + raise ValueError("Invalid page blob start_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(start_range)) + if end_range is not None and end_range % 512 != 511: + raise ValueError("Invalid page blob end_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(end_range)) + + # Format based on whether end_range is present + range_header = None + if end_range is not None: + range_header = 'bytes={0}-{1}'.format(start_range, end_range) + elif start_range is not None: + range_header = "bytes={0}-".format(start_range) + + # Content MD5 can only be provided for a complete range less than 4MB in size + range_validation = None + if check_content_md5: + if start_range is None or end_range is None: + raise ValueError("Both start and end range requied for MD5 content validation.") + if end_range - start_range > 4 * 1024 * 1024: + raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") + range_validation = 'true' + + return range_header, range_validation + + +def add_metadata_headers(metadata=None): + # type: (Optional[Dict[str, str]]) -> Dict[str, str] + headers = {} + if metadata: + for key, value in metadata.items(): + headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value + return headers + + +def serialize_batch_body(requests, batch_id): + """ + -- + + -- + (repeated as needed) + ---- + + Serializes the requests in this batch to a single HTTP mixed/multipart body. + + :param list[~azure.core.pipeline.transport.HttpRequest] requests: + a list of sub-request for the batch request + :param str batch_id: + to be embedded in batch sub-request delimiter + :return: The body bytes for this batch. + """ + + if requests is None or len(requests) == 0: + raise ValueError('Please provide sub-request(s) for this batch request') + + delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') + newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') + batch_body = list() + + content_index = 0 + for request in requests: + request.headers.update({ + "Content-ID": str(content_index), + "Content-Length": str(0) + }) + batch_body.append(delimiter_bytes) + batch_body.append(_make_body_from_sub_request(request)) + batch_body.append(newline_bytes) + content_index += 1 + + batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) + # final line of body MUST have \r\n at the end, or it will not be properly read by the service + batch_body.append(newline_bytes) + + return bytes().join(batch_body) + + +def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): + """ + Gets the delimiter used for this batch request's mixed/multipart HTTP format. + + :param str batch_id: + Randomly generated id + :param bool is_prepend_dashes: + Whether to include the starting dashes. Used in the body, but non on defining the delimiter. + :param bool is_append_dashes: + Whether to include the ending dashes. Used in the body on the closing delimiter only. + :return: The delimiter, WITHOUT a trailing newline. + """ + + prepend_dashes = '--' if is_prepend_dashes else '' + append_dashes = '--' if is_append_dashes else '' + + return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes + + +def _make_body_from_sub_request(sub_request): + """ + Content-Type: application/http + Content-ID: + Content-Transfer-Encoding: (if present) + + HTTP/ +
:
(repeated as necessary) + Content-Length: + (newline if content length > 0) + (if content length > 0) + + Serializes an http request. + + :param ~azure.core.pipeline.transport.HttpRequest sub_request: + Request to serialize. + :return: The serialized sub-request in bytes + """ + + # put the sub-request's headers into a list for efficient str concatenation + sub_request_body = list() + + # get headers for ease of manipulation; remove headers as they are used + headers = sub_request.headers + + # append opening headers + sub_request_body.append("Content-Type: application/http") + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-ID: ") + sub_request_body.append(headers.pop("Content-ID", "")) + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-Transfer-Encoding: binary") + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + # append HTTP verb and path and query and HTTP version + sub_request_body.append(sub_request.method) + sub_request_body.append(' ') + sub_request_body.append(sub_request.url) + sub_request_body.append(' ') + sub_request_body.append(_HTTP1_1_IDENTIFIER) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) + for header_name, header_value in headers.items(): + if header_value is not None: + sub_request_body.append(header_name) + sub_request_body.append(": ") + sub_request_body.append(header_value) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/response_handlers.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/response_handlers.py new file mode 100644 index 0000000..32a923f --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/response_handlers.py @@ -0,0 +1,192 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging +from xml.etree.ElementTree import Element + +from azure.core.pipeline.policies import ContentDecodePolicy +from azure.core.exceptions import ( + HttpResponseError, + ResourceNotFoundError, + ResourceModifiedError, + ResourceExistsError, + ClientAuthenticationError, + DecodeError) + +from .parser import _to_utc_datetime +from .models import StorageErrorCode, UserDelegationKey, get_enum_value + + +if TYPE_CHECKING: + from datetime import datetime + from azure.core.exceptions import AzureError + + +_LOGGER = logging.getLogger(__name__) + + +class PartialBatchErrorException(HttpResponseError): + """There is a partial failure in batch operations. + + :param str message: The message of the exception. + :param response: Server response to be deserialized. + :param list parts: A list of the parts in multipart response. + """ + + def __init__(self, message, response, parts): + self.parts = parts + super(PartialBatchErrorException, self).__init__(message=message, response=response) + + +def parse_length_from_content_range(content_range): + ''' + Parses the blob length from the content range header: bytes 1-3/65537 + ''' + if content_range is None: + return None + + # First, split in space and take the second half: '1-3/65537' + # Next, split on slash and take the second half: '65537' + # Finally, convert to an int: 65537 + return int(content_range.split(' ', 1)[1].split('/', 1)[1]) + + +def normalize_headers(headers): + normalized = {} + for key, value in headers.items(): + if key.startswith('x-ms-'): + key = key[5:] + normalized[key.lower().replace('-', '_')] = get_enum_value(value) + return normalized + + +def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument + raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} + return {k[10:]: v for k, v in raw_metadata.items()} + + +def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers) + + +def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers), deserialized + + +def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return response.http_response.location_mode, deserialized + + +def process_storage_error(storage_error): # pylint:disable=too-many-statements + raise_error = HttpResponseError + serialized = False + if not storage_error.response: + raise storage_error + # If it is one of those three then it has been serialized prior by the generated layer. + if isinstance(storage_error, (PartialBatchErrorException, + ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError)): + serialized = True + error_code = storage_error.response.headers.get('x-ms-error-code') + error_message = storage_error.message + additional_data = {} + error_dict = {} + try: + error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) + # If it is an XML response + if isinstance(error_body, Element): + error_dict = { + child.tag.lower(): child.text + for child in error_body + } + # If it is a JSON response + elif isinstance(error_body, dict): + error_dict = error_body.get('error', {}) + elif not error_code: + _LOGGER.warning( + 'Unexpected return type % from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) + error_dict = {'message': str(error_body)} + + # If we extracted from a Json or XML response + if error_dict: + error_code = error_dict.get('code') + error_message = error_dict.get('message') + additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} + except DecodeError: + pass + + try: + # This check would be unnecessary if we have already serialized the error + if error_code and not serialized: + error_code = StorageErrorCode(error_code) + if error_code in [StorageErrorCode.condition_not_met, + StorageErrorCode.blob_overwritten]: + raise_error = ResourceModifiedError + if error_code in [StorageErrorCode.invalid_authentication_info, + StorageErrorCode.authentication_failed]: + raise_error = ClientAuthenticationError + if error_code in [StorageErrorCode.resource_not_found, + StorageErrorCode.cannot_verify_copy_source, + StorageErrorCode.blob_not_found, + StorageErrorCode.queue_not_found, + StorageErrorCode.container_not_found, + StorageErrorCode.parent_not_found, + StorageErrorCode.share_not_found]: + raise_error = ResourceNotFoundError + if error_code in [StorageErrorCode.account_already_exists, + StorageErrorCode.account_being_created, + StorageErrorCode.resource_already_exists, + StorageErrorCode.resource_type_mismatch, + StorageErrorCode.blob_already_exists, + StorageErrorCode.queue_already_exists, + StorageErrorCode.container_already_exists, + StorageErrorCode.container_being_deleted, + StorageErrorCode.queue_being_deleted, + StorageErrorCode.share_already_exists, + StorageErrorCode.share_being_deleted]: + raise_error = ResourceExistsError + except ValueError: + # Got an unknown error code + pass + + # Error message should include all the error properties + try: + error_message += "\nErrorCode:{}".format(error_code.value) + except AttributeError: + error_message += "\nErrorCode:{}".format(error_code) + for name, info in additional_data.items(): + error_message += "\n{}:{}".format(name, info) + + # No need to create an instance if it has already been serialized by the generated layer + if serialized: + storage_error.message = error_message + error = storage_error + else: + error = raise_error(message=error_message, response=storage_error.response) + # Ensure these properties are stored in the error instance as well (not just the error message) + error.error_code = error_code + error.additional_info = additional_data + # error.args is what's surfaced on the traceback - show error message in all cases + error.args = (error.message,) + try: + # `from None` prevents us from double printing the exception (suppresses generated layer error context) + exec("raise error from None") # pylint: disable=exec-used # nosec + except SyntaxError: + raise error + + +def parse_to_internal_user_delegation_key(service_user_delegation_key): + internal_user_delegation_key = UserDelegationKey() + internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid + internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid + internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) + internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) + internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service + internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version + internal_user_delegation_key.value = service_user_delegation_key.value + return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/shared_access_signature.py new file mode 100644 index 0000000..07aad5f --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/shared_access_signature.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from datetime import date + +from .parser import _str, _to_utc_datetime +from .constants import X_MS_VERSION +from . import sign_string, url_quote + + +class QueryStringConstants(object): + SIGNED_SIGNATURE = 'sig' + SIGNED_PERMISSION = 'sp' + SIGNED_START = 'st' + SIGNED_EXPIRY = 'se' + SIGNED_RESOURCE = 'sr' + SIGNED_IDENTIFIER = 'si' + SIGNED_IP = 'sip' + SIGNED_PROTOCOL = 'spr' + SIGNED_VERSION = 'sv' + SIGNED_CACHE_CONTROL = 'rscc' + SIGNED_CONTENT_DISPOSITION = 'rscd' + SIGNED_CONTENT_ENCODING = 'rsce' + SIGNED_CONTENT_LANGUAGE = 'rscl' + SIGNED_CONTENT_TYPE = 'rsct' + START_PK = 'spk' + START_RK = 'srk' + END_PK = 'epk' + END_RK = 'erk' + SIGNED_RESOURCE_TYPES = 'srt' + SIGNED_SERVICES = 'ss' + SIGNED_OID = 'skoid' + SIGNED_TID = 'sktid' + SIGNED_KEY_START = 'skt' + SIGNED_KEY_EXPIRY = 'ske' + SIGNED_KEY_SERVICE = 'sks' + SIGNED_KEY_VERSION = 'skv' + + # for ADLS + SIGNED_AUTHORIZED_OID = 'saoid' + SIGNED_UNAUTHORIZED_OID = 'suoid' + SIGNED_CORRELATION_ID = 'scid' + SIGNED_DIRECTORY_DEPTH = 'sdd' + + @staticmethod + def to_list(): + return [ + QueryStringConstants.SIGNED_SIGNATURE, + QueryStringConstants.SIGNED_PERMISSION, + QueryStringConstants.SIGNED_START, + QueryStringConstants.SIGNED_EXPIRY, + QueryStringConstants.SIGNED_RESOURCE, + QueryStringConstants.SIGNED_IDENTIFIER, + QueryStringConstants.SIGNED_IP, + QueryStringConstants.SIGNED_PROTOCOL, + QueryStringConstants.SIGNED_VERSION, + QueryStringConstants.SIGNED_CACHE_CONTROL, + QueryStringConstants.SIGNED_CONTENT_DISPOSITION, + QueryStringConstants.SIGNED_CONTENT_ENCODING, + QueryStringConstants.SIGNED_CONTENT_LANGUAGE, + QueryStringConstants.SIGNED_CONTENT_TYPE, + QueryStringConstants.START_PK, + QueryStringConstants.START_RK, + QueryStringConstants.END_PK, + QueryStringConstants.END_RK, + QueryStringConstants.SIGNED_RESOURCE_TYPES, + QueryStringConstants.SIGNED_SERVICES, + QueryStringConstants.SIGNED_OID, + QueryStringConstants.SIGNED_TID, + QueryStringConstants.SIGNED_KEY_START, + QueryStringConstants.SIGNED_KEY_EXPIRY, + QueryStringConstants.SIGNED_KEY_SERVICE, + QueryStringConstants.SIGNED_KEY_VERSION, + # for ADLS + QueryStringConstants.SIGNED_AUTHORIZED_OID, + QueryStringConstants.SIGNED_UNAUTHORIZED_OID, + QueryStringConstants.SIGNED_CORRELATION_ID, + QueryStringConstants.SIGNED_DIRECTORY_DEPTH, + ] + + +class SharedAccessSignature(object): + ''' + Provides a factory for creating account access + signature tokens with an account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + :param str x_ms_version: + The service version used to generate the shared access signatures. + ''' + self.account_name = account_name + self.account_key = account_key + self.x_ms_version = x_ms_version + + def generate_account(self, services, resource_types, permission, expiry, start=None, + ip=None, protocol=None): + ''' + Generates a shared access signature for the account. + Use the returned signature with the sas_token parameter of the service + or to create a new account object. + + :param ResourceTypes resource_types: + Specifies the resource types that are accessible with the account + SAS. You can combine values to provide access to more than one + resource type. + :param AccountSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. You can combine + values to provide more than one permission. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + ''' + sas = _SharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_account(services, resource_types) + sas.add_account_signature(self.account_name, self.account_key) + + return sas.get_token() + + +class _SharedAccessHelper(object): + def __init__(self): + self.query_dict = {} + + def _add_query(self, name, val): + if val: + self.query_dict[name] = _str(val) if val is not None else None + + def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): + if isinstance(start, date): + start = _to_utc_datetime(start) + + if isinstance(expiry, date): + expiry = _to_utc_datetime(expiry) + + self._add_query(QueryStringConstants.SIGNED_START, start) + self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) + self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) + self._add_query(QueryStringConstants.SIGNED_IP, ip) + self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) + self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) + + def add_resource(self, resource): + self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) + + def add_id(self, policy_id): + self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) + + def add_account(self, services, resource_types): + self._add_query(QueryStringConstants.SIGNED_SERVICES, services) + self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) + + def add_override_response_headers(self, cache_control, + content_disposition, + content_encoding, + content_language, + content_type): + self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) + self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) + self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) + self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) + self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) + + def add_account_signature(self, account_name, account_key): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + string_to_sign = \ + (account_name + '\n' + + get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + + get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + + get_value_to_append(QueryStringConstants.SIGNED_START) + + get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + get_value_to_append(QueryStringConstants.SIGNED_IP) + + get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(QueryStringConstants.SIGNED_VERSION)) + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key, string_to_sign)) + + def get_token(self): + return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/uploads.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/uploads.py new file mode 100644 index 0000000..1b619df --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/uploads.py @@ -0,0 +1,602 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from concurrent import futures +from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) +from threading import Lock +from itertools import islice +from math import ceil + +import six + +from azure.core.tracing.common import with_current_context + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." + + +def _parallel_uploads(executor, uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(executor.submit(with_current_context(uploader), next_chunk)) + except StopIteration: + break + + # Wait for the remaining uploads to finish + done, _running = futures.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + validate_content=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + validate_content=validate_content, + **kwargs) + if parallel: + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + executor.submit(with_current_context(uploader.process_chunk), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + executor.submit(with_current_context(uploader.process_substream_block), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] + if any(range_ids): + return sorted(range_ids) + return [] + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b"" + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError("Blob data should be of type bytes.") + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b"" or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + def _update_progress(self, length): + if self.progress_lock is not None: + with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = self._upload_chunk(chunk_offset, chunk_data) + self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield index, SubStream(self.stream, index, length, lock) + + def process_substream_block(self, block_data): + return self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + def _upload_substream_block(self, index, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_substream_block_with_progress(self, index, block_stream): + range_id = self._upload_substream_block(index, block_stream) + self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop("modified_access_conditions", None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + self.service.stage_block( + block_id, + len(chunk_data), + chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return index, block_id + + def _upload_substream_block(self, index, block_stream): + try: + block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) + self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + return not any(bytearray(chunk_data)) + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = self.service.upload_pages( + body=chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + def _upload_substream_block(self, index, block_stream): + pass + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + self.current_length = int(self.response_headers["blob_append_offset"]) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + def _upload_substream_block(self, index, block_stream): + pass + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + self.response_headers = self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + def _upload_substream_block(self, index, block_stream): + try: + self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response + + # TODO: Implement this method. + def _upload_substream_block(self, index, block_stream): + pass + + +class SubStream(IOBase): + + def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): + # Python 2.7: file-like objects created with open() typically support seek(), but are not + # derivations of io.IOBase and thus do not implement seekable(). + # Python > 3.0: file-like objects created with open() are derived from io.IOBase. + try: + # only the main thread runs this, so there's no need grabbing the lock + wrapped_stream.seek(0, SEEK_CUR) + except: + raise ValueError("Wrapped stream must support seek().") + + self._lock = lockObj + self._wrapped_stream = wrapped_stream + self._position = 0 + self._stream_begin_index = stream_begin_index + self._length = length + self._buffer = BytesIO() + + # we must avoid buffering more than necessary, and also not use up too much memory + # so the max buffer size is capped at 4MB + self._max_buffer_size = ( + length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE + ) + self._current_buffer_start = 0 + self._current_buffer_size = 0 + super(SubStream, self).__init__() + + def __len__(self): + return self._length + + def close(self): + if self._buffer: + self._buffer.close() + self._wrapped_stream = None + IOBase.close(self) + + def fileno(self): + return self._wrapped_stream.fileno() + + def flush(self): + pass + + def read(self, size=None): + if self.closed: # pylint: disable=using-constant-test + raise ValueError("Stream is closed.") + + if size is None: + size = self._length - self._position + + # adjust if out of bounds + if size + self._position >= self._length: + size = self._length - self._position + + # return fast + if size == 0 or self._buffer.closed: + return b"" + + # attempt first read from the read buffer and update position + read_buffer = self._buffer.read(size) + bytes_read = len(read_buffer) + bytes_remaining = size - bytes_read + self._position += bytes_read + + # repopulate the read buffer from the underlying stream to fulfill the request + # ensure the seek and read operations are done atomically (only if a lock is provided) + if bytes_remaining > 0: + with self._buffer: + # either read in the max buffer size specified on the class + # or read in just enough data for the current block/sub stream + current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) + + # lock is only defined if max_concurrency > 1 (parallel uploads) + if self._lock: + with self._lock: + # reposition the underlying stream to match the start of the data to read + absolute_position = self._stream_begin_index + self._position + self._wrapped_stream.seek(absolute_position, SEEK_SET) + # If we can't seek to the right location, our read will be corrupted so fail fast. + if self._wrapped_stream.tell() != absolute_position: + raise IOError("Stream failed to seek to the desired location.") + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + else: + absolute_position = self._stream_begin_index + self._position + # It's possible that there's connection problem during data transfer, + # so when we retry we don't want to read from current position of wrapped stream, + # instead we should seek to where we want to read from. + if self._wrapped_stream.tell() != absolute_position: + self._wrapped_stream.seek(absolute_position, SEEK_SET) + + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + + if buffer_from_stream: + # update the buffer with new data from the wrapped stream + # we need to note down the start position and size of the buffer, in case seek is performed later + self._buffer = BytesIO(buffer_from_stream) + self._current_buffer_start = self._position + self._current_buffer_size = len(buffer_from_stream) + + # read the remaining bytes from the new buffer and update position + second_read_buffer = self._buffer.read(bytes_remaining) + read_buffer += second_read_buffer + self._position += len(second_read_buffer) + + return read_buffer + + def readable(self): + return True + + def readinto(self, b): + raise UnsupportedOperation + + def seek(self, offset, whence=0): + if whence is SEEK_SET: + start_index = 0 + elif whence is SEEK_CUR: + start_index = self._position + elif whence is SEEK_END: + start_index = self._length + offset = -offset + else: + raise ValueError("Invalid argument for the 'whence' parameter.") + + pos = start_index + offset + + if pos > self._length: + pos = self._length + elif pos < 0: + pos = 0 + + # check if buffer is still valid + # if not, drop buffer + if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: + self._buffer.close() + self._buffer = BytesIO() + else: # if yes seek to correct position + delta = pos - self._current_buffer_start + self._buffer.seek(delta, SEEK_SET) + + self._position = pos + return pos + + def seekable(self): + return True + + def tell(self): + return self._position + + def write(self): + raise UnsupportedOperation + + def writelines(self): + raise UnsupportedOperation + + def writeable(self): + return False + + +class IterStreamer(object): + """ + File-like streaming iterator. + """ + + def __init__(self, generator, encoding="UTF-8"): + self.generator = generator + self.iterator = iter(generator) + self.leftover = b"" + self.encoding = encoding + + def __len__(self): + return self.generator.__len__() + + def __iter__(self): + return self.iterator + + def seekable(self): + return False + + def __next__(self): + return next(self.iterator) + + next = __next__ # Python 2 compatibility. + + def tell(self, *args, **kwargs): + raise UnsupportedOperation("Data generator does not support tell.") + + def seek(self, *args, **kwargs): + raise UnsupportedOperation("Data generator is unseekable.") + + def read(self, size): + data = self.leftover + count = len(self.leftover) + try: + while count < size: + chunk = self.__next__() + if isinstance(chunk, six.text_type): + chunk = chunk.encode(self.encoding) + data += chunk + count += len(chunk) + except StopIteration: + pass + + if count > size: + self.leftover = data[size:] + + return data[:size] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/uploads_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/uploads_async.py new file mode 100644 index 0000000..5ed192b --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared/uploads_async.py @@ -0,0 +1,395 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +import asyncio +from asyncio import Lock +from itertools import islice +import threading + +from math import ceil + +import six + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder +from .uploads import SubStream, IterStreamer # pylint: disable=unused-import + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' + + +async def _parallel_uploads(uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(asyncio.ensure_future(uploader(next_chunk))) + except StopIteration: + break + + # Wait for the remaining uploads to finish + if running: + done, _running = await asyncio.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +async def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + asyncio.ensure_future(uploader.process_chunk(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [] + for chunk in uploader.get_chunk_streams(): + range_ids.append(await uploader.process_chunk(chunk)) + + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +async def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + asyncio.ensure_future(uploader.process_substream_block(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [] + for block in uploader.get_substream_blocks(): + range_ids.append(await uploader.process_substream_block(block)) + if any(range_ids): + return sorted(range_ids) + return + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = threading.Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b'' + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError('Blob data should be of type bytes.') + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b'' or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + async def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + async def _update_progress(self, length): + if self.progress_lock is not None: + async with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + async def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = await self._upload_chunk(chunk_offset, chunk_data) + await self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield index, SubStream(self.stream, index, length, lock) + + async def process_substream_block(self, block_data): + return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + async def _upload_substream_block(self, index, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_substream_block_with_progress(self, index, block_stream): + range_id = await self._upload_substream_block(index, block_stream) + await self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop('modified_access_conditions', None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + await self.service.stage_block( + block_id, + len(chunk_data), + body=chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + return index, block_id + + async def _upload_substream_block(self, index, block_stream): + try: + block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) + await self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + for each_byte in chunk_data: + if each_byte not in [0, b'\x00']: + return False + return True + + async def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = await self.service.upload_pages( + body=chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + async def _upload_substream_block(self, index, block_stream): + pass + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = await self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + self.current_length = int(self.response_headers['blob_append_offset']) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = await self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + async def _upload_substream_block(self, index, block_stream): + pass + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + self.response_headers = await self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + async def _upload_substream_block(self, index, block_stream): + try: + await self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = await self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + return range_id, response + + # TODO: Implement this method. + async def _upload_substream_block(self, index, block_stream): + pass diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared_access_signature.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared_access_signature.py new file mode 100644 index 0000000..ba27198 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_shared_access_signature.py @@ -0,0 +1,391 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING + +from azure.multiapi.storagev2.blob.v2020_06_12 import generate_account_sas as generate_blob_account_sas +from azure.multiapi.storagev2.blob.v2020_06_12 import generate_container_sas, generate_blob_sas +if TYPE_CHECKING: + import datetime + from ._models import AccountSasPermissions, FileSystemSasPermissions, FileSasPermissions, ResourceTypes, \ + UserDelegationKey + + +def generate_account_sas( + account_name, # type: str + account_key, # type: str + resource_types, # type: Union[ResourceTypes, str] + permission, # type: Union[AccountSasPermissions, str] + expiry, # type: Optional[Union[datetime, str]] + **kwargs # type: Any + ): # type: (...) -> str + """Generates a shared access signature for the DataLake service. + + Use the returned signature as the credential parameter of any DataLakeServiceClient, + FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str account_key: + The access key to generate the shared access signature. + :param resource_types: + Specifies the resource types that are accessible with the account SAS. + :type resource_types: str or ~azure.storage.filedatalake.ResourceTypes + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.filedatalake.AccountSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :keyword start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :paramtype start: ~datetime.datetime or str + :keyword str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + return generate_blob_account_sas( + account_name=account_name, + account_key=account_key, + resource_types=resource_types, + permission=permission, + expiry=expiry, + **kwargs + ) + + +def generate_file_system_sas( + account_name, # type: str + file_system_name, # type: str + credential, # type: Union[str, UserDelegationKey] + permission=None, # type: Optional[Union[FileSystemSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for a file system. + + Use the returned signature with the credential parameter of any DataLakeServiceClient, + FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str file_system_name: + The name of the file system. + :param str credential: + Credential could be either account key or user delegation key. + If use account key is used as credential, then the credential type should be a str. + Instead of an account key, the user could also pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished + by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :type credential: str or ~azure.storage.filedatalake.UserDelegationKey + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.filedatalake.FileSystemSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :keyword start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :paramtype start: datetime or str + :keyword str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :keyword str preauthorized_agent_object_id: + The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform + the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the + user delegation key has the required permissions before granting access but no additional permission check for + the agent object id will be performed. + :keyword str agent_object_id: + The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to + perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner + of the user delegation key has the required permissions before granting access and the service will perform an + additional POSIX ACL check to determine if this user is authorized to perform the requested operation. + :keyword str correlation_id: + The correlation id to correlate the storage audit logs with the audit logs used by the principal + generating and distributing the SAS. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + return generate_container_sas( + account_name=account_name, + container_name=file_system_name, + account_key=credential if isinstance(credential, str) else None, + user_delegation_key=credential if not isinstance(credential, str) else None, + permission=permission, + expiry=expiry, + **kwargs) + + +def generate_directory_sas( + account_name, # type: str + file_system_name, # type: str + directory_name, # type: str + credential, # type: Union[str, UserDelegationKey] + permission=None, # type: Optional[Union[FileSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for a directory. + + Use the returned signature with the credential parameter of any DataLakeServiceClient, + FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str file_system_name: + The name of the file system. + :param str directory_name: + The name of the directory. + :param str credential: + Credential could be either account key or user delegation key. + If use account key is used as credential, then the credential type should be a str. + Instead of an account key, the user could also pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished + by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :type credential: str or ~azure.storage.filedatalake.UserDelegationKey + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.filedatalake.FileSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :keyword start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :paramtype start: ~datetime.datetime or str + :keyword str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :keyword str preauthorized_agent_object_id: + The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform + the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the + user delegation key has the required permissions before granting access but no additional permission check for + the agent object id will be performed. + :keyword str agent_object_id: + The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to + perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner + of the user delegation key has the required permissions before granting access and the service will perform an + additional POSIX ACL check to determine if this user is authorized to perform the requested operation. + :keyword str correlation_id: + The correlation id to correlate the storage audit logs with the audit logs used by the principal + generating and distributing the SAS. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + depth = len(directory_name.strip("/").split("/")) + return generate_blob_sas( + account_name=account_name, + container_name=file_system_name, + blob_name=directory_name, + account_key=credential if isinstance(credential, str) else None, + user_delegation_key=credential if not isinstance(credential, str) else None, + permission=permission, + expiry=expiry, + sdd=depth, + is_directory=True, + **kwargs) + + +def generate_file_sas( + account_name, # type: str + file_system_name, # type: str + directory_name, # type: str + file_name, # type: str + credential, # type: Union[str, UserDelegationKey] + permission=None, # type: Optional[Union[FileSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for a file. + + Use the returned signature with the credential parameter of any BDataLakeServiceClient, + FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str file_system_name: + The name of the file system. + :param str directory_name: + The name of the directory. + :param str file_name: + The name of the file. + :param str credential: + Credential could be either account key or user delegation key. + If use account key is used as credential, then the credential type should be a str. + Instead of an account key, the user could also pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished + by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :type credential: str or ~azure.storage.filedatalake.UserDelegationKey + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.filedatalake.FileSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :keyword start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :paramtype start: ~datetime.datetime or str + :keyword str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :keyword str preauthorized_agent_object_id: + The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform + the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the + user delegation key has the required permissions before granting access but no additional permission check for + the agent object id will be performed. + :keyword str agent_object_id: + The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to + perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner + of the user delegation key has the required permissions before granting access and the service will perform an + additional POSIX ACL check to determine if this user is authorized to perform the requested operation. + :keyword str correlation_id: + The correlation id to correlate the storage audit logs with the audit logs used by the principal + generating and distributing the SAS. This can only be used when to generate sas with delegation key. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + if directory_name: + path = directory_name.rstrip('/') + "/" + file_name + else: + path = file_name + return generate_blob_sas( + account_name=account_name, + container_name=file_system_name, + blob_name=path, + account_key=credential if isinstance(credential, str) else None, + user_delegation_key=credential if not isinstance(credential, str) else None, + permission=permission, + expiry=expiry, + **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_upload_helper.py new file mode 100644 index 0000000..6d88c32 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_upload_helper.py @@ -0,0 +1,104 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from ._deserialize import ( + process_storage_error) +from ._shared.response_handlers import return_response_headers +from ._shared.uploads import ( + upload_data_chunks, + DataLakeFileChunkUploader, upload_substream_blocks) +from azure.core.exceptions import HttpResponseError + + +def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument + return any([ + modified_access_conditions.if_modified_since, + modified_access_conditions.if_unmodified_since, + modified_access_conditions.if_none_match, + modified_access_conditions.if_match + ]) + + +def upload_datalake_file( # pylint: disable=unused-argument + client=None, + stream=None, + length=None, + overwrite=None, + validate_content=None, + max_concurrency=None, + file_settings=None, + **kwargs): + try: + if length == 0: + return {} + properties = kwargs.pop('properties', None) + umask = kwargs.pop('umask', None) + permissions = kwargs.pop('permissions', None) + path_http_headers = kwargs.pop('path_http_headers', None) + modified_access_conditions = kwargs.pop('modified_access_conditions', None) + chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) + + if not overwrite: + # if customers didn't specify access conditions, they cannot flush data to existing file + if not _any_conditions(modified_access_conditions): + modified_access_conditions.if_none_match = '*' + if properties or umask or permissions: + raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") + + if overwrite: + response = client.create( + resource='file', + path_http_headers=path_http_headers, + properties=properties, + modified_access_conditions=modified_access_conditions, + umask=umask, + permissions=permissions, + cls=return_response_headers, + **kwargs) + + # this modified_access_conditions will be applied to flush_data to make sure + # no other flush between create and the current flush + modified_access_conditions.if_match = response['etag'] + modified_access_conditions.if_none_match = None + modified_access_conditions.if_modified_since = None + modified_access_conditions.if_unmodified_since = None + + use_original_upload_path = file_settings.use_byte_buffer or \ + validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \ + hasattr(stream, 'seekable') and not stream.seekable() or \ + not hasattr(stream, 'seek') or not hasattr(stream, 'tell') + + if use_original_upload_path: + upload_data_chunks( + service=client, + uploader_class=DataLakeFileChunkUploader, + total_size=length, + chunk_size=chunk_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + **kwargs) + else: + upload_substream_blocks( + service=client, + uploader_class=DataLakeFileChunkUploader, + total_size=length, + chunk_size=chunk_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + **kwargs + ) + + return client.flush_data(position=length, + path_http_headers=path_http_headers, + modified_access_conditions=modified_access_conditions, + close=True, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/_version.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_version.py new file mode 100644 index 0000000..d731da5 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/_version.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +VERSION = "12.5.0" diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/__init__.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/__init__.py new file mode 100644 index 0000000..c24dde8 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/__init__.py @@ -0,0 +1,24 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ._download_async import StorageStreamDownloader +from .._shared.policies_async import ExponentialRetry, LinearRetry +from ._data_lake_file_client_async import DataLakeFileClient +from ._data_lake_directory_client_async import DataLakeDirectoryClient +from ._file_system_client_async import FileSystemClient +from ._data_lake_service_client_async import DataLakeServiceClient +from ._data_lake_lease_async import DataLakeLeaseClient + +__all__ = [ + 'DataLakeServiceClient', + 'FileSystemClient', + 'DataLakeDirectoryClient', + 'DataLakeFileClient', + 'DataLakeLeaseClient', + 'ExponentialRetry', + 'LinearRetry', + 'StorageStreamDownloader' +] diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_directory_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_directory_client_async.py new file mode 100644 index 0000000..6254efb --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_directory_client_async.py @@ -0,0 +1,553 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +from typing import Any + +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore +from azure.core.pipeline import AsyncPipeline +from ._data_lake_file_client_async import DataLakeFileClient +from .._data_lake_directory_client import DataLakeDirectoryClient as DataLakeDirectoryClientBase +from .._models import DirectoryProperties, FileProperties +from .._deserialize import deserialize_dir_properties +from ._path_client_async import PathClient +from .._shared.base_client_async import AsyncTransportWrapper + + +class DataLakeDirectoryClient(PathClient, DataLakeDirectoryClientBase): + """A client to interact with the DataLake directory, even if the directory may not yet exist. + + For operations relating to a specific subdirectory or file under the directory, a directory client or file client + can be retrieved using the :func:`~get_sub_directory_client` or :func:`~get_file_client` functions. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param directory_name: + The whole path of the directory. eg. {directory under file system}/{directory to interact with} + :type directory_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py + :start-after: [START instantiate_directory_client_from_conn_str] + :end-before: [END instantiate_directory_client_from_conn_str] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient from connection string. + """ + + def __init__( + self, account_url, # type: str + file_system_name, # type: str + directory_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + super(DataLakeDirectoryClient, self).__init__(account_url, file_system_name, directory_name, # pylint: disable=specify-parameter-names-in-call + credential=credential, **kwargs) + + async def create_directory(self, metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create a new directory. + + :param metadata: + Name-value pairs associated with the directory as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the directory has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: response dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory_async.py + :start-after: [START create_directory] + :end-before: [END create_directory] + :language: python + :dedent: 8 + :caption: Create directory. + """ + return await self._create('directory', metadata=metadata, **kwargs) + + async def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a directory exists and returns False otherwise. + + :kwarg int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + return await self._exists(**kwargs) + + async def delete_directory(self, **kwargs): + # type: (...) -> None + """ + Marks the specified directory for deletion. + + :keyword lease: + Required if the directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory_async.py + :start-after: [START delete_directory] + :end-before: [END delete_directory] + :language: python + :dedent: 4 + :caption: Delete directory. + """ + return await self._delete(recursive=True, **kwargs) + + async def get_directory_properties(self, **kwargs): + # type: (**Any) -> DirectoryProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the directory. It does not return the content of the directory. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: DirectoryProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory_async.py + :start-after: [START get_directory_properties] + :end-before: [END get_directory_properties] + :language: python + :dedent: 4 + :caption: Getting the properties for a file/directory. + """ + return await self._get_path_properties(cls=deserialize_dir_properties, **kwargs) # pylint: disable=protected-access + + async def rename_directory(self, new_name, # type: str + **kwargs): + # type: (**Any) -> DataLakeDirectoryClient + """ + Rename the source directory. + + :param str new_name: + the new directory name the user want to rename to. + The value must have the following format: "{filesystem}/{directory}/{subdirectory}". + :keyword source_lease: + A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_directory_async.py + :start-after: [START rename_directory] + :end-before: [END rename_directory] + :language: python + :dedent: 4 + :caption: Rename the source directory. + """ + new_name = new_name.strip('/') + new_file_system = new_name.split('/')[0] + new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') + new_path = new_path_and_token[0] + try: + new_dir_sas = new_path_and_token[1] or self._query_str.strip('?') + except IndexError: + if not self._raw_credential and new_file_system != self.file_system_name: + raise ValueError("please provide the sas token for the new directory") + if not self._raw_credential and new_file_system == self.file_system_name: + new_dir_sas = self._query_str.strip('?') + + new_directory_client = DataLakeDirectoryClient( + "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, directory_name=new_path, + credential=self._raw_credential or new_dir_sas, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + await new_directory_client._rename_path( # pylint: disable=protected-access + '/{}/{}{}'.format(quote(unquote(self.file_system_name)), + quote(unquote(self.path_name)), + self._query_str), + **kwargs) + return new_directory_client + + async def create_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Create a subdirectory and return the subdirectory client to be interacted with. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient for the subdirectory. + """ + subdir = self.get_sub_directory_client(sub_directory) + await subdir.create_directory(metadata=metadata, **kwargs) + return subdir + + async def delete_sub_directory(self, sub_directory, # type: Union[DirectoryProperties, str] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Marks the specified subdirectory for deletion. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :keyword lease: + Required if the directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient for the subdirectory + """ + subdir = self.get_sub_directory_client(sub_directory) + await subdir.delete_directory(**kwargs) + return subdir + + async def create_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Create a new file and return the file client to be interacted with. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + """ + file_client = self.get_file_client(file) + await file_client.create_file(**kwargs) + return file_client + + def get_file_client(self, file # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. eg. directory/subdirectory/file + :type file: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START bsc_get_file_client] + :end-before: [END bsc_get_file_client] + :language: python + :dedent: 12 + :caption: Getting the file client to interact with a specific file. + """ + try: + file_path = file.get('name') + except AttributeError: + file_path = self.path_name + '/' + str(file) + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return DataLakeFileClient( + self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, + api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def get_sub_directory_client(self, sub_directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified subdirectory of the current directory. + + The sub subdirectory need not already exist. + + :param sub_directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type sub_directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/test_datalake_service_samples.py + :start-after: [START bsc_get_directory_client] + :end-before: [END bsc_get_directory_client] + :language: python + :dedent: 12 + :caption: Getting the directory client to interact with a specific directory. + """ + try: + subdir_path = sub_directory.get('name') + except AttributeError: + subdir_path = self.path_name + '/' + str(sub_directory) + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return DataLakeDirectoryClient( + self.url, self.file_system_name, directory_name=subdir_path, credential=self._raw_credential, + api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_file_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_file_client_async.py new file mode 100644 index 0000000..df25ecf --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_file_client_async.py @@ -0,0 +1,574 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +from typing import Any +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore + +from azure.core.exceptions import HttpResponseError +from ._download_async import StorageStreamDownloader +from ._path_client_async import PathClient +from .._data_lake_file_client import DataLakeFileClient as DataLakeFileClientBase +from .._serialize import convert_datetime_to_rfc1123 +from .._deserialize import process_storage_error, deserialize_file_properties +from .._models import FileProperties +from ..aio._upload_helper import upload_datalake_file + + +class DataLakeFileClient(PathClient, DataLakeFileClientBase): + """A client to interact with the DataLake file, even if the file may not yet exist. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param file_path: + The whole file path, so that to interact with a specific file. + eg. "{directory}/{subdirectory}/{file}" + :type file_path: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_instantiate_client_async.py + :start-after: [START instantiate_file_client_from_conn_str] + :end-before: [END instantiate_file_client_from_conn_str] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient from connection string. + """ + + def __init__( + self, account_url, # type: str + file_system_name, # type: str + file_path, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + super(DataLakeFileClient, self).__init__(account_url, file_system_name, path_name=file_path, + credential=credential, **kwargs) + + async def create_file(self, content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create a new file. + + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: response dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START create_file] + :end-before: [END create_file] + :language: python + :dedent: 4 + :caption: Create file. + """ + return await self._create('file', content_settings=content_settings, metadata=metadata, **kwargs) + + async def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a file exists and returns False otherwise. + + :kwarg int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + return await self._exists(**kwargs) + + async def delete_file(self, **kwargs): + # type: (...) -> None + """ + Marks the specified file for deletion. + + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START delete_file] + :end-before: [END delete_file] + :language: python + :dedent: 4 + :caption: Delete file. + """ + return await self._delete(**kwargs) + + async def get_file_properties(self, **kwargs): + # type: (**Any) -> FileProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file. It does not return the content of the file. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START get_file_properties] + :end-before: [END get_file_properties] + :language: python + :dedent: 4 + :caption: Getting the properties for a file. + """ + return await self._get_path_properties(cls=deserialize_file_properties, **kwargs) # pylint: disable=protected-access + + async def set_file_expiry(self, expiry_options, # type: str + expires_on=None, # type: Optional[Union[datetime, int]] + **kwargs): + # type: (str, Optional[Union[datetime, int]], **Any) -> None + """Sets the time a file will expire and be deleted. + + :param str expiry_options: + Required. Indicates mode of the expiry time. + Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute' + :param datetime or int expires_on: + The time to set the file to expiry. + When expiry_options is RelativeTo*, expires_on should be an int in milliseconds + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + try: + expires_on = convert_datetime_to_rfc1123(expires_on) + except AttributeError: + expires_on = str(expires_on) + await self._datalake_client_for_blob_operation.path.set_expiry(expiry_options, expires_on=expires_on, + **kwargs) # pylint: disable=protected-access + + async def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + overwrite=False, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Any] + """ + Upload data to a file. + + :param data: Content to be uploaded to file + :param int length: Size of the data in bytes. + :param bool overwrite: to overwrite an existing file or not. + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword metadata: + Name-value pairs associated with the blob as metadata. + :paramtype metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.DataLakeLeaseClient or str lease: + Required if the blob has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :keyword str umask: Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword int chunk_size: + The maximum chunk size for uploading a file in chunks. + Defaults to 100*1024*1024, or 100MB. + :return: response dict (Etag and last modified). + """ + options = self._upload_options( + data, + length=length, + overwrite=overwrite, + **kwargs) + return await upload_datalake_file(**options) + + async def append_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] + offset, # type: int + length=None, # type: Optional[int] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """Append data to the file. + + :param data: Content to be appended to file + :param offset: start position of the data to be appended to. + :param length: Size of the data in bytes. + :keyword bool validate_content: + If true, calculates an MD5 hash of the block content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + file. + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :return: dict of the response header + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START append_data] + :end-before: [END append_data] + :language: python + :dedent: 4 + :caption: Append data to the file. + """ + options = self._append_data_options( + data, + offset, + length=length, + **kwargs) + try: + return await self._client.path.append_data(**options) + except HttpResponseError as error: + process_storage_error(error) + + async def flush_data(self, offset, # type: int + retain_uncommitted_data=False, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ Commit the previous appended data. + + :param offset: offset is equal to the length of the file after commit the + previous appended data. + :param bool retain_uncommitted_data: Valid only for flush operations. If + "true", uncommitted data is retained after the flush operation + completes; otherwise, the uncommitted data is deleted after the flush + operation. The default is false. Data at offsets less than the + specified position are written to the file when flush succeeds, but + this optional parameter allows data after the flush position to be + retained for a future flush operation. + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword bool close: Azure Storage Events allow applications to receive + notifications when files change. When Azure Storage Events are + enabled, a file changed event is raised. This event has a property + indicating whether this is the final change to distinguish the + difference between an intermediate flush to a file stream and the + final close of a file stream. The close query parameter is valid only + when the action is "flush" and change notifications are enabled. If + the value of close is "true" and the flush operation completes + successfully, the service raises a file change notification with a + property indicating that this is the final update (the file stream has + been closed). If "false" a change notification is raised indicating + the file has changed. The default is false. This query parameter is + set to true by the Hadoop ABFS driver to indicate that the file stream + has been closed." + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :return: response header in dict + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START upload_file_to_file_system] + :end-before: [END upload_file_to_file_system] + :language: python + :dedent: 12 + :caption: Commit the previous appended data. + """ + options = self._flush_data_options( + offset, + retain_uncommitted_data=retain_uncommitted_data, **kwargs) + try: + return await self._client.path.flush_data(**options) + except HttpResponseError as error: + process_storage_error(error) + + async def download_file(self, offset=None, length=None, **kwargs): + # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader + """Downloads a file to the StorageStreamDownloader. The readall() method must + be used to read all the content, or readinto() must be used to download the file into + a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. + + :param int offset: + Start of byte range to use for downloading a section of the file. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword lease: + If specified, download only succeeds if the file's lease is active + and matches this ID. Required if the file has an active lease. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.filedatalake.aio.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START read_file] + :end-before: [END read_file] + :language: python + :dedent: 4 + :caption: Return the downloaded data. + """ + downloader = await self._blob_client.download_blob(offset=offset, length=length, **kwargs) + return StorageStreamDownloader(downloader) + + async def rename_file(self, new_name, **kwargs): + # type: (str, **Any) -> DataLakeFileClient + """ + Rename the source file. + + :param str new_name: the new file name the user want to rename to. + The value must have the following format: "{filesystem}/{directory}/{subdirectory}/{file}". + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword source_lease: A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :type permissions: str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: the renamed file client + :rtype: DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_upload_download_async.py + :start-after: [START rename_file] + :end-before: [END rename_file] + :language: python + :dedent: 4 + :caption: Rename the source file. + """ + new_name = new_name.strip('/') + new_file_system = new_name.split('/')[0] + new_path_and_token = new_name[len(new_file_system):].strip('/').split('?') + new_path = new_path_and_token[0] + try: + new_file_sas = new_path_and_token[1] or self._query_str.strip('?') + except IndexError: + if not self._raw_credential and new_file_system != self.file_system_name: + raise ValueError("please provide the sas token for the new file") + if not self._raw_credential and new_file_system == self.file_system_name: + new_file_sas = self._query_str.strip('?') + + new_file_client = DataLakeFileClient( + "{}://{}".format(self.scheme, self.primary_hostname), new_file_system, file_path=new_path, + credential=self._raw_credential or new_file_sas, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + await new_file_client._rename_path( # pylint: disable=protected-access + '/{}/{}{}'.format(quote(unquote(self.file_system_name)), + quote(unquote(self.path_name)), + self._query_str), + **kwargs) + return new_file_client diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_lease_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_lease_async.py new file mode 100644 index 0000000..a5e4ccc --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_lease_async.py @@ -0,0 +1,243 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, + TypeVar, TYPE_CHECKING +) +from azure.multiapi.storagev2.blob.v2020_06_12.aio import BlobLeaseClient +from .._data_lake_lease import DataLakeLeaseClient as DataLakeLeaseClientBase + + +if TYPE_CHECKING: + FileSystemClient = TypeVar("FileSystemClient") + DataLakeDirectoryClient = TypeVar("DataLakeDirectoryClient") + DataLakeFileClient = TypeVar("DataLakeFileClient") + + +class DataLakeLeaseClient(DataLakeLeaseClientBase): + """Creates a new DataLakeLeaseClient. + + This client provides lease operations on a FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the file system, directory, or file to lease. + :type client: ~azure.storage.filedatalake.aio.FileSystemClient or + ~azure.storage.filedatalake.aio.DataLakeDirectoryClient or ~azure.storage.filedatalake.aio.DataLakeFileClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + def __init__( + self, client, lease_id=None + ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs + # type: (Union[FileSystemClient, DataLakeDirectoryClient, DataLakeFileClient], Optional[str]) -> None + super(DataLakeLeaseClient, self).__init__(client, lease_id) + + if hasattr(client, '_blob_client'): + _client = client._blob_client # type: ignore # pylint: disable=protected-access + elif hasattr(client, '_container_client'): + _client = client._container_client # type: ignore # pylint: disable=protected-access + else: + raise TypeError("Lease must use any of FileSystemClient DataLakeDirectoryClient, or DataLakeFileClient.") + + self._blob_lease_client = BlobLeaseClient(_client, lease_id=lease_id) + + def __enter__(self): + raise TypeError("Async lease must use 'async with'.") + + def __exit__(self, *args): + self.release() + + async def __aenter__(self): + return self + + async def __aexit__(self, *args): + await self.release() + + async def acquire(self, lease_duration=-1, **kwargs): + # type: (int, Optional[int], **Any) -> None + """Requests a new lease. + + If the file/file system does not have an active lease, the DataLake service creates a + lease on the file/file system and returns a new lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + await self._blob_lease_client.acquire(lease_duration=lease_duration, **kwargs) + self._update_lease_client_attributes() + + async def renew(self, **kwargs): + # type: (Any) -> None + """Renews the lease. + + The lease can be renewed if the lease ID specified in the + lease client matches that associated with the file system or file. Note that + the lease may be renewed even if it has expired as long as the file system + or file has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + await self._blob_lease_client.renew(**kwargs) + self._update_lease_client_attributes() + + async def release(self, **kwargs): + # type: (Any) -> None + """Release the lease. + + The lease may be released if the client lease id specified matches + that associated with the file system or file. Releasing the lease allows another client + to immediately acquire the lease for the file system or file as soon as the release is complete. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + await self._blob_lease_client.release(**kwargs) + self._update_lease_client_attributes() + + async def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """Change the lease ID of an active lease. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + await self._blob_lease_client.change(proposed_lease_id=proposed_lease_id, **kwargs) + self._update_lease_client_attributes() + + async def break_lease(self, lease_break_period=None, **kwargs): + # type: (Optional[int], Any) -> int + """Break the lease, if the file system or file has an active lease. + + Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. When a lease + is broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the file system or file. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :param int lease_break_period: + This is the proposed duration of seconds that the lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the lease. If longer, the time remaining on the lease is used. + A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + await self._blob_lease_client.break_lease(lease_break_period=lease_break_period, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_service_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_service_client_async.py new file mode 100644 index 0000000..905cd3b --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_data_lake_service_client_async.py @@ -0,0 +1,507 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +from typing import Optional, Any, Dict + +from azure.core.paging import ItemPaged +from azure.core.pipeline import AsyncPipeline + +from azure.multiapi.storagev2.blob.v2020_06_12.aio import BlobServiceClient +from .._serialize import get_api_version +from .._generated.aio import AzureDataLakeStorageRESTAPI +from .._deserialize import get_datalake_service_properties +from .._shared.base_client_async import AsyncTransportWrapper, AsyncStorageAccountHostsMixin +from ._file_system_client_async import FileSystemClient +from .._data_lake_service_client import DataLakeServiceClient as DataLakeServiceClientBase +from .._shared.policies_async import ExponentialRetry +from ._data_lake_directory_client_async import DataLakeDirectoryClient +from ._data_lake_file_client_async import DataLakeFileClient +from ._models import FileSystemPropertiesPaged +from .._models import UserDelegationKey, LocationMode + + +class DataLakeServiceClient(AsyncStorageAccountHostsMixin, DataLakeServiceClientBase): + """A client to interact with the DataLake Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete file systems within the account. + For operations relating to a specific file system, directory or file, clients for those entities + can also be retrieved using the `get_client` functions. + + :ivar str url: + The full endpoint URL to the datalake service endpoint. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URL to the DataLake storage account. Any other entities included + in the URL path (e.g. file system or file) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START create_datalake_service_client] + :end-before: [END create_datalake_service_client] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient from connection string. + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START create_datalake_service_client_oauth] + :end-before: [END create_datalake_service_client_oauth] + :language: python + :dedent: 4 + :caption: Creating the DataLakeServiceClient with Azure Identity credentials. + """ + + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(DataLakeServiceClient, self).__init__( + account_url, + credential=credential, + **kwargs + ) + self._blob_service_client = BlobServiceClient(self._blob_account_url, credential, **kwargs) + self._blob_service_client._hosts[LocationMode.SECONDARY] = "" #pylint: disable=protected-access + self._client = AzureDataLakeStorageRESTAPI(self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs) #pylint: disable=protected-access + self._loop = kwargs.get('loop', None) + + async def __aenter__(self): + await self._blob_service_client.__aenter__() + return self + + async def __aexit__(self, *args): + await self._blob_service_client.close() + + async def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._blob_service_client.close() + + async def get_user_delegation_key(self, key_start_time, # type: datetime + key_expiry_time, # type: datetime + **kwargs # type: Any + ): + # type: (...) -> UserDelegationKey + """ + Obtain a user delegation key for the purpose of signing SAS tokens. + A token credential must be present on the service object for this request to succeed. + + :param ~datetime.datetime key_start_time: + A DateTime value. Indicates when the key becomes valid. + :param ~datetime.datetime key_expiry_time: + A DateTime value. Indicates when the key stops being valid. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: The user delegation key. + :rtype: ~azure.storage.filedatalake.UserDelegationKey + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START get_user_delegation_key] + :end-before: [END get_user_delegation_key] + :language: python + :dedent: 8 + :caption: Get user delegation key from datalake service client. + """ + delegation_key = await self._blob_service_client.get_user_delegation_key( + key_start_time=key_start_time, + key_expiry_time=key_expiry_time, + **kwargs) # pylint: disable=protected-access + return UserDelegationKey._from_generated(delegation_key) # pylint: disable=protected-access + + def list_file_systems(self, name_starts_with=None, # type: Optional[str] + include_metadata=None, # type: Optional[bool] + **kwargs): + # type: (...) -> ItemPaged[FileSystemProperties] + """Returns a generator to list the file systems under the specified account. + + The generator will lazily follow the continuation tokens returned by + the service and stop when all file systems have been returned. + + :param str name_starts_with: + Filters the results to return only file systems whose names + begin with the specified prefix. + :param bool include_metadata: + Specifies that file system metadata be returned in the response. + The default value is `False`. + :keyword int results_per_page: + The maximum number of file system names to retrieve per API + call. If the request does not specify the server will return up to 5,000 items per page. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword bool include_deleted: + Specifies that deleted file systems to be returned in the response. This is for file system restore enabled + account. The default value is `False`. + .. versionadded:: 12.3.0 + :returns: An iterable (auto-paging) of FileSystemProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.FileSystemProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START list_file_systems] + :end-before: [END list_file_systems] + :language: python + :dedent: 8 + :caption: Listing the file systems in the datalake service. + """ + item_paged = self._blob_service_client.list_containers(name_starts_with=name_starts_with, + include_metadata=include_metadata, + **kwargs) # pylint: disable=protected-access + item_paged._page_iterator_class = FileSystemPropertiesPaged # pylint: disable=protected-access + return item_paged + + async def create_file_system(self, file_system, # type: Union[FileSystemProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[PublicAccess] + **kwargs): + # type: (...) -> FileSystemClient + """Creates a new file system under the specified account. + + If the file system with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created file system. + + :param str file_system: + The name of the file system to create. + :param metadata: + A dict with name-value pairs to associate with the + file system as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + Possible values include: file system, file. + :type public_access: ~azure.storage.filedatalake.PublicAccess + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START create_file_system_from_service_client] + :end-before: [END create_file_system_from_service_client] + :language: python + :dedent: 8 + :caption: Creating a file system in the datalake service. + """ + file_system_client = self.get_file_system_client(file_system) + await file_system_client.create_file_system(metadata=metadata, public_access=public_access, **kwargs) + return file_system_client + + async def _rename_file_system(self, name, new_name, **kwargs): + # type: (str, str, **Any) -> FileSystemClient + """Renames a filesystem. + + Operation is successful only if the source filesystem exists. + + :param str name: + The name of the filesystem to rename. + :param str new_name: + The new filesystem name the user wants to rename to. + :keyword lease: + Specify this to perform only if the lease ID given + matches the active lease ID of the source filesystem. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.FileSystemClient + """ + await self._blob_service_client._rename_container(name, new_name, **kwargs) # pylint: disable=protected-access + renamed_file_system = self.get_file_system_client(new_name) + return renamed_file_system + + async def undelete_file_system(self, name, deleted_version, **kwargs): + # type: (str, str, **Any) -> FileSystemClient + """Restores soft-deleted filesystem. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.3.0 + This operation was introduced in API version '2019-12-12'. + + :param str name: + Specifies the name of the deleted filesystem to restore. + :param str deleted_version: + Specifies the version of the deleted filesystem to restore. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.FileSystemClient + """ + new_name = kwargs.pop('new_name', None) + await self._blob_service_client.undelete_container(name, deleted_version, new_name=new_name, **kwargs) # pylint: disable=protected-access + file_system = self.get_file_system_client(new_name or name) + return file_system + + async def delete_file_system(self, file_system, # type: Union[FileSystemProperties, str] + **kwargs): + # type: (...) -> FileSystemClient + """Marks the specified file system for deletion. + + The file system and any files contained within it are later deleted during garbage collection. + If the file system is not found, a ResourceNotFoundError will be raised. + + :param file_system: + The file system to delete. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :keyword lease: + If specified, delete_file_system only succeeds if the + file system's lease is active and matches this ID. + Required if the file system has an active lease. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START delete_file_system_from_service_client] + :end-before: [END delete_file_system_from_service_client] + :language: python + :dedent: 8 + :caption: Deleting a file system in the datalake service. + """ + file_system_client = self.get_file_system_client(file_system) + await file_system_client.delete_file_system(**kwargs) + return file_system_client + + def get_file_system_client(self, file_system # type: Union[FileSystemProperties, str] + ): + # type: (...) -> FileSystemClient + """Get a client to interact with the specified file system. + + The file system need not already exist. + + :param file_system: + The file system. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :returns: A FileSystemClient. + :rtype: ~azure.storage.filedatalake.aio.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START create_file_system_client_from_service] + :end-before: [END create_file_system_client_from_service] + :language: python + :dedent: 8 + :caption: Getting the file system client to interact with a specific file system. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return FileSystemClient(self.url, file_system_name, credential=self._raw_credential, + api_version=self.api_version, + _configuration=self._config, + _pipeline=self._pipeline, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + def get_directory_client(self, file_system, # type: Union[FileSystemProperties, str] + directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified directory. + + The directory need not already exist. + + :param file_system: + The file system that the directory is in. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START get_directory_client_from_service_client] + :end-before: [END get_directory_client_from_service_client] + :language: python + :dedent: 8 + :caption: Getting the directory client to interact with a specific directory. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + try: + directory_name = directory.name + except AttributeError: + directory_name = directory + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return DataLakeDirectoryClient(self.url, file_system_name, directory_name=directory_name, + credential=self._raw_credential, + api_version=self.api_version, + _configuration=self._config, _pipeline=self._pipeline, + _hosts=self._hosts, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function + ) + + def get_file_client(self, file_system, # type: Union[FileSystemProperties, str] + file_path # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file_system: + The file system that the file is in. This can either be the name of the file system, + or an instance of FileSystemProperties. + :type file_system: str or ~azure.storage.filedatalake.FileSystemProperties + :param file_path: + The file with which to interact. This can either be the full path of the file(from the root directory), + or an instance of FileProperties. eg. directory/subdirectory/file + :type file_path: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_service_async.py + :start-after: [START get_file_client_from_service_client] + :end-before: [END get_file_client_from_service_client] + :language: python + :dedent: 8 + :caption: Getting the file client to interact with a specific file. + """ + try: + file_system_name = file_system.name + except AttributeError: + file_system_name = file_system + try: + file_path = file_path.name + except AttributeError: + pass + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return DataLakeFileClient( + self.url, file_system_name, file_path=file_path, credential=self._raw_credential, + api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + + async def set_service_properties(self, **kwargs): + # type: (**Any) -> None + """Sets the properties of a storage account's Datalake service, including + Azure Storage Analytics. + + If an element (e.g. analytics_logging) is left as None, the + existing settings on the service for that functionality are preserved. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2020-06-12'. + + :keyword analytics_logging: + Groups the Azure Analytics Logging settings. + :type analytics_logging: ~azure.storage.filedatalake.AnalyticsLogging + :keyword hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates. + :type hour_metrics: ~azure.storage.filedatalake.Metrics + :keyword minute_metrics: + The minute metrics settings provide request statistics + for each minute. + :type minute_metrics: ~azure.storage.filedatalake.Metrics + :keyword cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list[~azure.storage.filedatalake.CorsRule] + :keyword str target_version: + Indicates the default version to use for requests if an incoming + request's version is not specified. + :keyword delete_retention_policy: + The delete retention policy specifies whether to retain deleted files/directories. + It also specifies the number of days and versions of file/directory to keep. + :type delete_retention_policy: ~azure.storage.filedatalake.RetentionPolicy + :keyword static_website: + Specifies whether the static website feature is enabled, + and if yes, indicates the index document and 404 error document to use. + :type static_website: ~azure.storage.filedatalake.StaticWebsite + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + return await self._blob_service_client.set_service_properties(**kwargs) # pylint: disable=protected-access + + async def get_service_properties(self, **kwargs): + # type: (**Any) -> Dict[str, Any] + """Gets the properties of a storage account's datalake service, including + Azure Storage Analytics. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2020-06-12'. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An object containing datalake service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + """ + props = await self._blob_service_client.get_service_properties(**kwargs) # pylint: disable=protected-access + return get_datalake_service_properties(props) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_download_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_download_async.py new file mode 100644 index 0000000..5685478 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_download_async.py @@ -0,0 +1,59 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from typing import AsyncIterator + +from .._deserialize import from_blob_properties + + +class StorageStreamDownloader(object): + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the file being downloaded. + :ivar ~azure.storage.filedatalake.FileProperties properties: + The properties of the file being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if speficied, + otherwise the total size of the file. + """ + + def __init__(self, downloader): + self._downloader = downloader + self.name = self._downloader.name + self.properties = from_blob_properties(self._downloader.properties) # pylint: disable=protected-access + self.size = self._downloader.size + + def __len__(self): + return self.size + + def chunks(self): + # type: () -> AsyncIterator[bytes] + """Iterate over chunks in the download stream. + + :rtype: AsyncIterator[bytes] + """ + return self._downloader.chunks() + + async def readall(self): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + :rtype: bytes or str + """ + return await self._downloader.readall() + + async def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + return await self._downloader.readinto(stream) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_file_system_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_file_system_client_async.py new file mode 100644 index 0000000..b78bf6a --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_file_system_client_async.py @@ -0,0 +1,874 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Dict, TYPE_CHECKING +) + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator import distributed_trace + +from azure.core.pipeline import AsyncPipeline +from azure.core.async_paging import AsyncItemPaged + +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.multiapi.storagev2.blob.v2020_06_12.aio import ContainerClient +from .._serialize import get_api_version +from .._deserialize import process_storage_error, is_file_path +from .._generated.models import ListBlobsIncludeItem + +from ._data_lake_file_client_async import DataLakeFileClient +from ._data_lake_directory_client_async import DataLakeDirectoryClient +from ._data_lake_lease_async import DataLakeLeaseClient +from .._deserialize import deserialize_path_properties +from .._file_system_client import FileSystemClient as FileSystemClientBase +from .._generated.aio import AzureDataLakeStorageRESTAPI +from .._shared.base_client_async import AsyncTransportWrapper, AsyncStorageAccountHostsMixin +from .._shared.policies_async import ExponentialRetry +from .._models import FileSystemProperties, PublicAccess, DirectoryProperties, FileProperties, DeletedPathProperties +from ._list_paths_helper import DeletedPathPropertiesPaged + + +if TYPE_CHECKING: + from datetime import datetime + from .._models import ( # pylint: disable=unused-import + ContentSettings) + + +class FileSystemClient(AsyncStorageAccountHostsMixin, FileSystemClientBase): + """A client to interact with a specific file system, even if that file system + may not yet exist. + + For operations relating to a specific directory or file within this file system, a directory client or file client + can be retrieved using the :func:`~get_directory_client` or :func:`~get_file_client` functions. + + :ivar str url: + The full endpoint URL to the file system, including SAS token if used. + :ivar str primary_endpoint: + The full primary endpoint URL. + :ivar str primary_hostname: + The hostname of the primary endpoint. + :param str account_url: + The URI to the storage account. + :param file_system_name: + The file system for the directory or files. + :type file_system_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials, an account + shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START create_file_system_client_from_service] + :end-before: [END create_file_system_client_from_service] + :language: python + :dedent: 8 + :caption: Get a FileSystemClient from an existing DataLakeServiceClient. + """ + + def __init__( + self, account_url, # type: str + file_system_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(FileSystemClient, self).__init__( + account_url, + file_system_name=file_system_name, + credential=credential, + **kwargs) + # to override the class field _container_client sync version + kwargs.pop('_hosts', None) + self._container_client = ContainerClient(self._blob_account_url, file_system_name, + credential=credential, + _hosts=self._container_client._hosts,# pylint: disable=protected-access + **kwargs) # type: ignore # pylint: disable=protected-access + self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, pipeline=self._pipeline) + self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._container_client.url, + file_system=file_system_name, + pipeline=self._pipeline) + api_version = get_api_version(kwargs) + self._client._config.version = api_version # pylint: disable=protected-access + self._datalake_client_for_blob_operation._config.version = api_version # pylint: disable=protected-access + + self._loop = kwargs.get('loop', None) + + async def __aexit__(self, *args): + await self._container_client.close() + await super(FileSystemClient, self).__aexit__(*args) + + async def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._container_client.close() + await self.__aexit__() + + @distributed_trace_async + async def acquire_lease( + self, lease_duration=-1, # type: int + lease_id=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> DataLakeLeaseClient + """ + Requests a new lease. If the file system does not have an active lease, + the DataLake service creates a lease on the file system and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A DataLakeLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START acquire_lease_on_file_system] + :end-before: [END acquire_lease_on_file_system] + :language: python + :dedent: 12 + :caption: Acquiring a lease on the file_system. + """ + lease = DataLakeLeaseClient(self, lease_id=lease_id) + await lease.acquire(lease_duration=lease_duration, **kwargs) + return lease + + @distributed_trace_async + async def create_file_system(self, metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[PublicAccess] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """Creates a new file system under the specified account. + + If the file system with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created file system. + + :param metadata: + A dict with name-value pairs to associate with the + file system as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + To specify whether data in the file system may be accessed publicly and the level of access. + :type public_access: ~azure.storage.filedatalake.PublicAccess + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.aio.FileSystemClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START create_file_system] + :end-before: [END create_file_system] + :language: python + :dedent: 16 + :caption: Creating a file system in the datalake service. + """ + return await self._container_client.create_container(metadata=metadata, + public_access=public_access, + **kwargs) + + @distributed_trace_async + async def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a file system exists and returns False otherwise. + + :kwarg int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + return await self._container_client.exists(**kwargs) + + @distributed_trace_async + async def _rename_file_system(self, new_name, **kwargs): + # type: (str, **Any) -> FileSystemClient + """Renames a filesystem. + + Operation is successful only if the source filesystem exists. + + :param str new_name: + The new filesystem name the user wants to rename to. + :keyword lease: + Specify this to perform only if the lease ID given + matches the active lease ID of the source filesystem. + :paramtype lease: ~azure.storage.filedatalake.DataLakeLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.filedatalake.FileSystemClient + """ + await self._container_client._rename_container(new_name, **kwargs) # pylint: disable=protected-access + renamed_file_system = FileSystemClient( + "{}://{}".format(self.scheme, self.primary_hostname), file_system_name=new_name, + credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, + _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function) + return renamed_file_system + + @distributed_trace_async + async def delete_file_system(self, **kwargs): + # type: (Any) -> None + """Marks the specified file system for deletion. + + The file system and any files contained within it are later deleted during garbage collection. + If the file system is not found, a ResourceNotFoundError will be raised. + + :keyword lease: + If specified, delete_file_system only succeeds if the + file system's lease is active and matches this ID. + Required if the file system has an active lease. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START delete_file_system] + :end-before: [END delete_file_system] + :language: python + :dedent: 16 + :caption: Deleting a file system in the datalake service. + """ + await self._container_client.delete_container(**kwargs) + + @distributed_trace_async + async def get_file_system_properties(self, **kwargs): + # type: (Any) -> FileSystemProperties + """Returns all user-defined metadata and system properties for the specified + file system. The data returned does not include the file system's list of paths. + + :keyword lease: + If specified, get_file_system_properties only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Properties for the specified file system within a file system object. + :rtype: ~azure.storage.filedatalake.FileSystemProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START get_file_system_properties] + :end-before: [END get_file_system_properties] + :language: python + :dedent: 16 + :caption: Getting properties on the file system. + """ + container_properties = await self._container_client.get_container_properties(**kwargs) + return FileSystemProperties._convert_from_container_props(container_properties) # pylint: disable=protected-access + + @distributed_trace_async + async def set_file_system_metadata( # type: ignore + self, metadata, # type: Dict[str, str] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + file system. Each call to this operation replaces all existing metadata + attached to the file system. To remove all metadata from the file system, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the file system as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: file system-updated property dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START set_file_system_metadata] + :end-before: [END set_file_system_metadata] + :language: python + :dedent: 16 + :caption: Setting metadata on the container. + """ + return await self._container_client.set_container_metadata(metadata=metadata, **kwargs) + + @distributed_trace_async + async def set_file_system_access_policy( + self, signed_identifiers, # type: Dict[str, AccessPolicy] + public_access=None, # type: Optional[Union[str, PublicAccess]] + **kwargs + ): # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the permissions for the specified file system or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether files in a file system may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the file system. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict[str, ~azure.storage.filedatalake.AccessPolicy] + :param ~azure.storage.filedatalake.PublicAccess public_access: + To specify whether data in the file system may be accessed publicly and the level of access. + :keyword lease: + Required if the file system has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified date/time. + :keyword ~datetime.datetime if_unmodified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: filesystem-updated property dict (Etag and last modified). + :rtype: dict[str, str or ~datetime.datetime] + """ + return await self._container_client.set_container_access_policy(signed_identifiers, + public_access=public_access, **kwargs) + + @distributed_trace_async + async def get_file_system_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the specified file system. + The permissions indicate whether file system data may be accessed publicly. + + :keyword lease: + If specified, get_file_system_access_policy only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + """ + access_policy = await self._container_client.get_container_access_policy(**kwargs) + return { + 'public_access': PublicAccess._from_generated(access_policy['public_access']), # pylint: disable=protected-access + 'signed_identifiers': access_policy['signed_identifiers'] + } + + @distributed_trace + def get_paths(self, path=None, # type: Optional[str] + recursive=True, # type: Optional[bool] + max_results=None, # type: Optional[int] + **kwargs): + # type: (...) -> AsyncItemPaged[PathProperties] + """Returns a generator to list the paths(could be files or directories) under the specified file system. + The generator will lazily follow the continuation tokens returned by + the service. + + :param str path: + Filters the results to return only paths under the specified path. + :param int max_results: + An optional value that specifies the maximum + number of items to return per page. If omitted or greater than 5,000, the + response will include up to 5,000 items per page. + :keyword upn: + Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of PathProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.filedatalake.PathProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START get_paths_in_file_system] + :end-before: [END get_paths_in_file_system] + :language: python + :dedent: 12 + :caption: List the blobs in the file system. + """ + timeout = kwargs.pop('timeout', None) + return self._client.file_system.list_paths( + recursive=recursive, + max_results=max_results, + path=path, + timeout=timeout, + cls=deserialize_path_properties, + **kwargs) + + @distributed_trace_async + async def create_directory(self, directory, # type: Union[DirectoryProperties, str] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Create directory + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START create_directory_from_file_system] + :end-before: [END create_directory_from_file_system] + :language: python + :dedent: 12 + :caption: Create directory in the file system. + """ + directory_client = self.get_directory_client(directory) + await directory_client.create_directory(metadata=metadata, **kwargs) + return directory_client + + @distributed_trace_async + async def delete_directory(self, directory, # type: Union[DirectoryProperties, str] + **kwargs): + # type: (...) -> DataLakeDirectoryClient + """ + Marks the specified path for deletion. + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START delete_directory_from_file_system] + :end-before: [END delete_directory_from_file_system] + :language: python + :dedent: 12 + :caption: Delete directory in the file system. + """ + directory_client = self.get_directory_client(directory) + await directory_client.delete_directory(**kwargs) + return directory_client + + @distributed_trace_async + async def create_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Create file + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword str permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START create_file_from_file_system] + :end-before: [END create_file_from_file_system] + :language: python + :dedent: 12 + :caption: Create file in the file system. + """ + file_client = self.get_file_client(file) + await file_client.create_file(**kwargs) + return file_client + + @distributed_trace_async + async def delete_file(self, file, # type: Union[FileProperties, str] + **kwargs): + # type: (...) -> DataLakeFileClient + """ + Marks the specified file for deletion. + + :param file: + The file with which to interact. This can either be the name of the file, + or an instance of FileProperties. + :type file: str or ~azure.storage.filedatalake.FileProperties + :keyword lease: + Required if the file has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: DataLakeFileClient + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START delete_file_from_file_system] + :end-before: [END delete_file_from_file_system] + :language: python + :dedent: 12 + :caption: Delete file in the file system. + """ + file_client = self.get_file_client(file) + await file_client.delete_file(**kwargs) + return file_client + + @distributed_trace_async + async def _undelete_path(self, deleted_path_name, deletion_id, **kwargs): + # type: (str, str, **Any) -> Union[DataLakeDirectoryClient, DataLakeFileClient] + """Restores soft-deleted path. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2020-06-12'. + + :param str deleted_path_name: + Specifies the name of the deleted container to restore. + :param str deletion_id: + Specifies the version of the deleted container to restore. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.file.datalake.aio.DataLakeDirectoryClient + or azure.storage.file.datalake.aio.DataLakeFileClient + """ + _, url, undelete_source = self._undelete_path_options(deleted_path_name, deletion_id) + + pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + path_client = AzureDataLakeStorageRESTAPI( + url, filesystem=self.file_system_name, path=deleted_path_name, pipeline=pipeline) + try: + is_file = await path_client.path.undelete(undelete_source=undelete_source, cls=is_file_path, **kwargs) + if is_file: + return self.get_file_client(deleted_path_name) + return self.get_directory_client(deleted_path_name) + except HttpResponseError as error: + process_storage_error(error) + + def _get_root_directory_client(self): + # type: () -> DataLakeDirectoryClient + """Get a client to interact with the root directory. + + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient + """ + return self.get_directory_client('/') + + def get_directory_client(self, directory # type: Union[DirectoryProperties, str] + ): + # type: (...) -> DataLakeDirectoryClient + """Get a client to interact with the specified directory. + + The directory need not already exist. + + :param directory: + The directory with which to interact. This can either be the name of the directory, + or an instance of DirectoryProperties. + :type directory: str or ~azure.storage.filedatalake.DirectoryProperties + :returns: A DataLakeDirectoryClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START get_directory_client_from_file_system] + :end-before: [END get_directory_client_from_file_system] + :language: python + :dedent: 12 + :caption: Getting the directory client to interact with a specific directory. + """ + try: + directory_name = directory.get('name') + except AttributeError: + directory_name = str(directory) + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return DataLakeDirectoryClient(self.url, self.file_system_name, directory_name=directory_name, + credential=self._raw_credential, + api_version=self.api_version, + _configuration=self._config, _pipeline=_pipeline, + _hosts=self._hosts, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function, + loop=self._loop + ) + + def get_file_client(self, file_path # type: Union[FileProperties, str] + ): + # type: (...) -> DataLakeFileClient + """Get a client to interact with the specified file. + + The file need not already exist. + + :param file_path: + The file with which to interact. This can either be the path of the file(from root directory), + or an instance of FileProperties. eg. directory/subdirectory/file + :type file_path: str or ~azure.storage.filedatalake.FileProperties + :returns: A DataLakeFileClient. + :rtype: ~azure.storage.filedatalake.aio.DataLakeFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/datalake_samples_file_system_async.py + :start-after: [START get_file_client_from_file_system] + :end-before: [END get_file_client_from_file_system] + :language: python + :dedent: 12 + :caption: Getting the file client to interact with a specific file. + """ + try: + file_path = file_path.get('name') + except AttributeError: + file_path = str(file_path) + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return DataLakeFileClient( + self.url, self.file_system_name, file_path=file_path, credential=self._raw_credential, + api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, + require_encryption=self.require_encryption, + key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function, loop=self._loop) + + @distributed_trace + def list_deleted_paths(self, **kwargs): + # type: (Any) -> AsyncItemPaged[DeletedPathProperties] + """Returns a generator to list the deleted (file or directory) paths under the specified file system. + The generator will lazily follow the continuation tokens returned by + the service. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2020-06-12'. + + :keyword str path_prefix: + Filters the results to return only paths under the specified path. + :keyword int results_per_page: + An optional value that specifies the maximum number of items to return per page. + If omitted or greater than 5,000, the response will include up to 5,000 items per page. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) response of DeletedPathProperties. + :rtype: + ~azure.core.paging.AsyncItemPaged[~azure.storage.filedatalake.DeletedPathProperties] + """ + path_prefix = kwargs.pop('path_prefix', None) + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._datalake_client_for_blob_operation.file_system.list_blob_hierarchy_segment, + showonly=ListBlobsIncludeItem.deleted, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, prefix=path_prefix, page_iterator_class=DeletedPathPropertiesPaged, + results_per_page=results_per_page, **kwargs) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_list_paths_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_list_paths_helper.py new file mode 100644 index 0000000..03831a5 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_list_paths_helper.py @@ -0,0 +1,111 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines +from azure.core.exceptions import HttpResponseError +from azure.core.async_paging import AsyncPageIterator + +from .._deserialize import process_storage_error, get_deleted_path_properties_from_generated_code +from .._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix + +from .._shared.models import DictMixin +from .._shared.response_handlers import return_context_and_deserialized + + +class DeletedPathPropertiesPaged(AsyncPageIterator): + """An Iterable of deleted path properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A path name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.filedatalake.DeletedPathProperties) + :ivar str container: The container that the paths are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + """ + def __init__( + self, command, + container=None, + prefix=None, + results_per_page=None, + continuation_token=None, + delimiter=None, + location_mode=None): + super(DeletedPathPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.container = container + self.delimiter = delimiter + self.current_page = None + self.location_mode = location_mode + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + prefix=self.prefix, + marker=continuation_token or None, + max_results=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.container = self._response.container_name + self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items + self.current_page = [self._build_item(item) for item in self.current_page] + self.delimiter = self._response.delimiter + + return self._response.next_marker or None, self.current_page + + def _build_item(self, item): + if isinstance(item, BlobItemInternal): + file_props = get_deleted_path_properties_from_generated_code(item) + file_props.file_system = self.container + return file_props + if isinstance(item, GenBlobPrefix): + return DirectoryPrefix( + container=self.container, + prefix=item.name, + results_per_page=self.results_per_page, + location_mode=self.location_mode) + return item + + +class DirectoryPrefix(DictMixin): + """Directory prefix. + + :ivar str name: Name of the deleted directory. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar str file_system: The file system that the deleted paths are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + """ + def __init__(self, **kwargs): + self.name = kwargs.get('prefix') + self.results_per_page = kwargs.get('results_per_page') + self.file_system = kwargs.get('container') + self.delimiter = kwargs.get('delimiter') + self.location_mode = kwargs.get('location_mode') diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_models.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_models.py new file mode 100644 index 0000000..16fe23c --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_models.py @@ -0,0 +1,41 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines +from azure.multiapi.storagev2.blob.v2020_06_12.aio._models import ContainerPropertiesPaged +from .._models import FileSystemProperties + + +class FileSystemPropertiesPaged(ContainerPropertiesPaged): + """An Iterable of File System properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file system name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.filedatalake.FileSystemProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only file systems whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of file system names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + + def __init__(self, *args, **kwargs): + super(FileSystemPropertiesPaged, self).__init__( + *args, + **kwargs + ) + + @staticmethod + def _build_item(item): + return FileSystemProperties._from_generated(item) # pylint: disable=protected-access diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_path_client_async.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_path_client_async.py new file mode 100644 index 0000000..8a5ad46 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_path_client_async.py @@ -0,0 +1,732 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +from datetime import datetime +from typing import Any, Dict, Union + +from azure.core.exceptions import AzureError, HttpResponseError +from azure.multiapi.storagev2.blob.v2020_06_12.aio import BlobClient +from .._serialize import get_api_version +from .._shared.base_client_async import AsyncStorageAccountHostsMixin +from .._path_client import PathClient as PathClientBase +from .._models import DirectoryProperties, AccessControlChangeResult, AccessControlChangeFailure, \ + AccessControlChangeCounters, AccessControlChanges +from .._generated.aio import AzureDataLakeStorageRESTAPI +from ._data_lake_lease_async import DataLakeLeaseClient +from .._deserialize import process_storage_error +from .._shared.policies_async import ExponentialRetry + +_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( + 'The require_encryption flag is set, but encryption is not supported' + ' for this method.') + + +class PathClient(AsyncStorageAccountHostsMixin, PathClientBase): + def __init__( + self, account_url, # type: str + file_system_name, # type: str + path_name, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + + super(PathClient, self).__init__(account_url, # pylint: disable=specify-parameter-names-in-call + file_system_name, path_name, + credential=credential, + **kwargs) # type: ignore + + kwargs.pop('_hosts', None) + + self._blob_client = BlobClient(account_url=self._blob_account_url, container_name=file_system_name, + blob_name=path_name, + credential=credential, + _hosts=self._blob_client._hosts, # pylint: disable=protected-access + **kwargs) + + self._client = AzureDataLakeStorageRESTAPI(self.url, file_system=file_system_name, path=path_name, + pipeline=self._pipeline) + self._datalake_client_for_blob_operation = AzureDataLakeStorageRESTAPI(self._blob_client.url, + file_system=file_system_name, + path=path_name, + pipeline=self._pipeline) + api_version = get_api_version(kwargs) + self._client._config.version = api_version # pylint: disable=protected-access + self._datalake_client_for_blob_operation._config.version = api_version # pylint: disable=protected-access + + self._loop = kwargs.get('loop', None) + + async def __aexit__(self, *args): + await self._blob_client.close() + await super(PathClient, self).__aexit__(*args) + + async def close(self): + # type: () -> None + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._blob_client.close() + await self.__aexit__() + + async def _create(self, resource_type, content_settings=None, metadata=None, **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Create directory or file + + :param resource_type: + Required for Create File and Create Directory. + The value must be "file" or "directory". Possible values include: + 'directory', 'file' + :type resource_type: str + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :param metadata: + Name-value pairs associated with the file/directory as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file/directory has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword str umask: + Optional and only valid if Hierarchical Namespace is enabled for the account. + When creating a file or directory and the parent folder does not have a default ACL, + the umask restricts the permissions of the file or directory to be created. + The resulting permission is given by p & ^u, where p is the permission and u is the umask. + For example, if p is 0777 and u is 0057, then the resulting permission is 0720. + The default permission is 0777 for a directory and 0666 for a file. The default umask is 0027. + The umask must be specified in 4-digit octal notation (e.g. 0766). + :keyword permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + :type permissions: str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Dict[str, Union[str, datetime]] + """ + options = self._create_path_options( + resource_type, + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return await self._client.path.create(**options) + except HttpResponseError as error: + process_storage_error(error) + + async def _delete(self, **kwargs): + # type: (**Any) -> Dict[Union[datetime, str]] + """ + Marks the specified path for deletion. + + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + options = self._delete_path_options(**kwargs) + try: + return await self._client.path.delete(**options) + except HttpResponseError as error: + process_storage_error(error) + + async def set_access_control(self, owner=None, # type: Optional[str] + group=None, # type: Optional[str] + permissions=None, # type: Optional[str] + acl=None, # type: Optional[str] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """ + Set the owner, group, permissions, or access control list for a path. + + :param owner: + Optional. The owner of the file or directory. + :type owner: str + :param group: + Optional. The owning group of the file or directory. + :type group: str + :param permissions: + Optional and only valid if Hierarchical Namespace + is enabled for the account. Sets POSIX access permissions for the file + owner, the file owning group, and others. Each class may be granted + read, write, or execute permission. The sticky bit is also supported. + Both symbolic (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are + supported. + permissions and acl are mutually exclusive. + :type permissions: str + :param acl: + Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + permissions and acl are mutually exclusive. + :type acl: str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword: response dict (Etag and last modified). + """ + options = self._set_access_control_options(owner=owner, group=group, permissions=permissions, acl=acl, **kwargs) + try: + return await self._client.path.set_access_control(**options) + except HttpResponseError as error: + process_storage_error(error) + + async def get_access_control(self, upn=None, # type: Optional[bool] + **kwargs): + # type: (...) -> Dict[str, Any] + """ + Get the owner, group, permissions, or access control list for a path. + + :param upn: + Optional. Valid only when Hierarchical Namespace is + enabled for the account. If "true", the user identity values returned + in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be + transformed from Azure Active Directory Object IDs to User Principal + Names. If "false", the values will be returned as Azure Active + Directory Object IDs. The default value is false. Note that group and + application Object IDs are not translated because they do not have + unique friendly names. + :type upn: bool + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword: response dict. + """ + options = self._get_access_control_options(upn=upn, **kwargs) + try: + return await self._client.path.get_properties(**options) + except HttpResponseError as error: + process_storage_error(error) + + async def set_access_control_recursive(self, + acl, + **kwargs): + # type: (str, **Any) -> AccessControlChangeResult + """ + Sets the Access Control on a path and sub-paths. + + :param acl: + Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: + Callback where the caller can track progress of the operation + as well as collect paths that failed to change Access Control. + :keyword str continuation_token: + Optional continuation token that can be used to resume previously stopped operation. + :keyword int batch_size: + Optional. If data set size exceeds batch size then operation will be split into multiple + requests so that progress can be tracked. Batch size should be between 1 and 2000. + The default when unspecified is 2000. + :keyword int max_batches: + Optional. Defines maximum number of batches that single change Access Control operation can execute. + If maximum is reached before all sub-paths are processed, + then continuation token can be used to resume operation. + Empty value indicates that maximum number of batches in unbound and operation continues till end. + :keyword bool continue_on_failure: + If set to False, the operation will terminate quickly on encountering user errors (4XX). + If True, the operation will ignore user errors and proceed with the operation on other sub-entities of + the directory. + Continuation token will only be returned when continue_on_failure is True in case of user errors. + If not set the default value is False for this. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: A summary of the recursive operations, including the count of successes and failures, + as well as a continuation token in case the operation was terminated prematurely. + :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` + :raises ~azure.core.exceptions.AzureError: + User can restart the operation using continuation_token field of AzureError if the token is available. + """ + if not acl: + raise ValueError("The Access Control List must be set for this operation") + + progress_hook = kwargs.pop('progress_hook', None) + max_batches = kwargs.pop('max_batches', None) + options = self._set_access_control_recursive_options(mode='set', acl=acl, **kwargs) + return await self._set_access_control_internal(options=options, progress_hook=progress_hook, + max_batches=max_batches) + + async def update_access_control_recursive(self, acl, **kwargs): + # type: (str, **Any) -> AccessControlChangeResult + """ + Modifies the Access Control on a path and sub-paths. + + :param acl: + Modifies POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: + Callback where the caller can track progress of the operation + as well as collect paths that failed to change Access Control. + :keyword str continuation_token: + Optional continuation token that can be used to resume previously stopped operation. + :keyword int batch_size: + Optional. If data set size exceeds batch size then operation will be split into multiple + requests so that progress can be tracked. Batch size should be between 1 and 2000. + The default when unspecified is 2000. + :keyword int max_batches: + Optional. Defines maximum number of batches that single, + change Access Control operation can execute. + If maximum is reached before all sub-paths are processed, + then continuation token can be used to resume operation. + Empty value indicates that maximum number of batches in unbound and operation continues till end. + :keyword bool continue_on_failure: + If set to False, the operation will terminate quickly on encountering user errors (4XX). + If True, the operation will ignore user errors and proceed with the operation on other sub-entities of + the directory. + Continuation token will only be returned when continue_on_failure is True in case of user errors. + If not set the default value is False for this. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: A summary of the recursive operations, including the count of successes and failures, + as well as a continuation token in case the operation was terminated prematurely. + :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` + :raises ~azure.core.exceptions.AzureError: + User can restart the operation using continuation_token field of AzureError if the token is available. + """ + if not acl: + raise ValueError("The Access Control List must be set for this operation") + + progress_hook = kwargs.pop('progress_hook', None) + max_batches = kwargs.pop('max_batches', None) + options = self._set_access_control_recursive_options(mode='modify', acl=acl, **kwargs) + return await self._set_access_control_internal(options=options, progress_hook=progress_hook, + max_batches=max_batches) + + async def remove_access_control_recursive(self, + acl, + **kwargs): + # type: (str, **Any) -> AccessControlChangeResult + """ + Removes the Access Control on a path and sub-paths. + + :param acl: + Removes POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, and a user or + group identifier in the format "[scope:][type]:[id]". + :type acl: str + :keyword func(~azure.storage.filedatalake.AccessControlChanges) progress_hook: + Callback where the caller can track progress of the operation + as well as collect paths that failed to change Access Control. + :keyword str continuation_token: + Optional continuation token that can be used to resume previously stopped operation. + :keyword int batch_size: + Optional. If data set size exceeds batch size then operation will be split into multiple + requests so that progress can be tracked. Batch size should be between 1 and 2000. + The default when unspecified is 2000. + :keyword int max_batches: + Optional. Defines maximum number of batches that single change Access Control operation can execute. + If maximum is reached before all sub-paths are processed, + then continuation token can be used to resume operation. + Empty value indicates that maximum number of batches in unbound and operation continues till end. + :keyword bool continue_on_failure: + If set to False, the operation will terminate quickly on encountering user errors (4XX). + If True, the operation will ignore user errors and proceed with the operation on other sub-entities of + the directory. + Continuation token will only be returned when continue_on_failure is True in case of user errors. + If not set the default value is False for this. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: A summary of the recursive operations, including the count of successes and failures, + as well as a continuation token in case the operation was terminated prematurely. + :rtype: :~azure.storage.filedatalake.AccessControlChangeResult` + :raises ~azure.core.exceptions.AzureError: + User can restart the operation using continuation_token field of AzureError if the token is available. + """ + if not acl: + raise ValueError("The Access Control List must be set for this operation") + + progress_hook = kwargs.pop('progress_hook', None) + max_batches = kwargs.pop('max_batches', None) + options = self._set_access_control_recursive_options(mode='remove', acl=acl, **kwargs) + return await self._set_access_control_internal(options=options, progress_hook=progress_hook, + max_batches=max_batches) + + async def _set_access_control_internal(self, options, progress_hook, max_batches=None): + try: + continue_on_failure = options.get('force_flag') + total_directories_successful = 0 + total_files_success = 0 + total_failure_count = 0 + batch_count = 0 + last_continuation_token = None + current_continuation_token = None + continue_operation = True + while continue_operation: + headers, resp = await self._client.path.set_access_control_recursive(**options) + + # make a running tally so that we can report the final results + total_directories_successful += resp.directories_successful + total_files_success += resp.files_successful + total_failure_count += resp.failure_count + batch_count += 1 + current_continuation_token = headers['continuation'] + + if current_continuation_token is not None: + last_continuation_token = current_continuation_token + + if progress_hook is not None: + await progress_hook(AccessControlChanges( + batch_counters=AccessControlChangeCounters( + directories_successful=resp.directories_successful, + files_successful=resp.files_successful, + failure_count=resp.failure_count, + ), + aggregate_counters=AccessControlChangeCounters( + directories_successful=total_directories_successful, + files_successful=total_files_success, + failure_count=total_failure_count, + ), + batch_failures=[AccessControlChangeFailure( + name=failure.name, + is_directory=failure.type == 'DIRECTORY', + error_message=failure.error_message) for failure in resp.failed_entries], + continuation=last_continuation_token)) + + # update the continuation token, if there are more operations that cannot be completed in a single call + max_batches_satisfied = (max_batches is not None and batch_count == max_batches) + continue_operation = bool(current_continuation_token) and not max_batches_satisfied + options['continuation'] = current_continuation_token + + # currently the service stops on any failure, so we should send back the last continuation token + # for the user to retry the failed updates + # otherwise we should just return what the service gave us + return AccessControlChangeResult(counters=AccessControlChangeCounters( + directories_successful=total_directories_successful, + files_successful=total_files_success, + failure_count=total_failure_count), + continuation=last_continuation_token + if total_failure_count > 0 and not continue_on_failure else current_continuation_token) + except HttpResponseError as error: + error.continuation_token = last_continuation_token + process_storage_error(error) + except AzureError as error: + error.continuation_token = last_continuation_token + raise error + + async def _rename_path(self, rename_source, **kwargs): + # type: (str, **Any) -> Dict[str, Any] + """ + Rename directory or file + + :param rename_source: The value must have the following format: "/{filesystem}/{path}". + :type rename_source: str + :keyword ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set path properties. + :keyword source_lease: A lease ID for the source path. If specified, + the source path must have an active lease and the leaase ID must + match. + :paramtype source_lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword lease: + Required if the file/directory has an active lease. Value can be a LeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + """ + options = self._rename_path_options( + rename_source, + **kwargs) + try: + return await self._client.path.create(**options) + except HttpResponseError as error: + process_storage_error(error) + + async def _get_path_properties(self, **kwargs): + # type: (**Any) -> Union[FileProperties, DirectoryProperties] + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file or directory. It does not return the content of the directory or file. + + :keyword lease: + Required if the directory or file has an active lease. Value can be a DataLakeLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: DirectoryProperties or FileProperties + """ + path_properties = await self._blob_client.get_blob_properties(**kwargs) + return path_properties + + async def _exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a path exists and returns False otherwise. + + :kwarg int timeout: + The timeout parameter is expressed in seconds. + :returns: boolean + """ + return await self._blob_client.exists(**kwargs) + + async def set_metadata(self, metadata, # type: Dict[str, str] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + file system. Each call to this operation replaces all existing metadata + attached to the file system. To remove all metadata from the file system, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the file system as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: file system-updated property dict (Etag and last modified). + """ + return await self._blob_client.set_blob_metadata(metadata=metadata, **kwargs) + + async def set_http_headers(self, content_settings=None, # type: Optional[ContentSettings] + **kwargs): + # type: (...) -> Dict[str, Any] + """Sets system properties on the file or directory. + + If one property is set for the content_settings, all properties will be overriden. + + :param ~azure.storage.filedatalake.ContentSettings content_settings: + ContentSettings object used to set file/directory properties. + :keyword lease: + If specified, set_file_system_metadata only succeeds if the + file system's lease is active and matches this ID. + :paramtype lease: ~azure.storage.filedatalake.aio.DataLakeLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: file/directory-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + return await self._blob_client.set_http_headers(content_settings=content_settings, **kwargs) + + async def acquire_lease(self, lease_duration=-1, # type: Optional[int] + lease_id=None, # type: Optional[str] + **kwargs): + # type: (...) -> DataLakeLeaseClient + """ + Requests a new lease. If the file or directory does not have an active lease, + the DataLake service creates a lease on the file/directory and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The DataLake service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A DataLakeLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.filedatalake.aio.DataLakeLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/test_file_system_samples.py + :start-after: [START acquire_lease_on_file_system] + :end-before: [END acquire_lease_on_file_system] + :language: python + :dedent: 8 + :caption: Acquiring a lease on the file_system. + """ + lease = DataLakeLeaseClient(self, lease_id=lease_id) # type: ignore + await lease.acquire(lease_duration=lease_duration, **kwargs) + return lease diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_upload_helper.py b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_upload_helper.py new file mode 100644 index 0000000..00d5bf1 --- /dev/null +++ b/azure/multiapi/storagev2/filedatalake/v2020_06_12/aio/_upload_helper.py @@ -0,0 +1,103 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use +from azure.core.exceptions import HttpResponseError +from .._deserialize import ( + process_storage_error) +from .._shared.response_handlers import return_response_headers +from .._shared.uploads_async import ( + upload_data_chunks, + DataLakeFileChunkUploader, upload_substream_blocks) + + +def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument + return any([ + modified_access_conditions.if_modified_since, + modified_access_conditions.if_unmodified_since, + modified_access_conditions.if_none_match, + modified_access_conditions.if_match + ]) + + +async def upload_datalake_file( # pylint: disable=unused-argument + client=None, + stream=None, + length=None, + overwrite=None, + validate_content=None, + max_concurrency=None, + file_settings=None, + **kwargs): + try: + if length == 0: + return {} + properties = kwargs.pop('properties', None) + umask = kwargs.pop('umask', None) + permissions = kwargs.pop('permissions', None) + path_http_headers = kwargs.pop('path_http_headers', None) + modified_access_conditions = kwargs.pop('modified_access_conditions', None) + chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) + + if not overwrite: + # if customers didn't specify access conditions, they cannot flush data to existing file + if not _any_conditions(modified_access_conditions): + modified_access_conditions.if_none_match = '*' + if properties or umask or permissions: + raise ValueError("metadata, umask and permissions can be set only when overwrite is enabled") + + if overwrite: + response = await client.create( + resource='file', + path_http_headers=path_http_headers, + properties=properties, + modified_access_conditions=modified_access_conditions, + umask=umask, + permissions=permissions, + cls=return_response_headers, + **kwargs) + + # this modified_access_conditions will be applied to flush_data to make sure + # no other flush between create and the current flush + modified_access_conditions.if_match = response['etag'] + modified_access_conditions.if_none_match = None + modified_access_conditions.if_modified_since = None + modified_access_conditions.if_unmodified_since = None + + use_original_upload_path = file_settings.use_byte_buffer or \ + validate_content or chunk_size < file_settings.min_large_chunk_upload_threshold or \ + hasattr(stream, 'seekable') and not stream.seekable() or \ + not hasattr(stream, 'seek') or not hasattr(stream, 'tell') + + if use_original_upload_path: + await upload_data_chunks( + service=client, + uploader_class=DataLakeFileChunkUploader, + total_size=length, + chunk_size=chunk_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + **kwargs) + else: + await upload_substream_blocks( + service=client, + uploader_class=DataLakeFileChunkUploader, + total_size=length, + chunk_size=chunk_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + **kwargs + ) + + return await client.flush_data(position=length, + path_http_headers=path_http_headers, + modified_access_conditions=modified_access_conditions, + close=True, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) diff --git a/azure/multiapi/storagev2/filedatalake/v2020_06_12/py.typed b/azure/multiapi/storagev2/filedatalake/v2020_06_12/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_directory_client.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_directory_client.py index 9d0b55a..557c94f 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_directory_client.py +++ b/azure/multiapi/storagev2/fileshare/v2020_04_08/_directory_client.py @@ -46,6 +46,10 @@ class ShareDirectoryClient(StorageAccountHostsMixin): For operations relating to a specific subdirectory or file in this share, the clients for those entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. + For more optional configuration, please click + `here `_. + :param str account_url: The URI to the storage account. In order to create a client given the full URI to the directory, use the :func:`from_directory_url` classmethod. diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_download.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_download.py index 8a86027..8f47bee 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_download.py +++ b/azure/multiapi/storagev2/fileshare/v2020_04_08/_download.py @@ -8,6 +8,7 @@ import threading import warnings from io import BytesIO +from typing import Iterator from azure.core.exceptions import HttpResponseError from azure.core.tracing.common import with_current_context @@ -171,8 +172,9 @@ def _download_chunk(self, chunk_start, chunk_end): class _ChunkIterator(object): """Async iterator for chunks in blob download stream.""" - def __init__(self, size, content, downloader): + def __init__(self, size, content, downloader, chunk_size): self.size = size + self._chunk_size = chunk_size self._current_content = content self._iter_downloader = downloader self._iter_chunks = None @@ -189,21 +191,37 @@ def __next__(self): if self._complete: raise StopIteration("Download complete") if not self._iter_downloader: - # If no iterator was supplied, the download completed with - # the initial GET, so we just return that data + # cut the data obtained from initial GET into chunks + if len(self._current_content) > self._chunk_size: + return self._get_chunk_data() self._complete = True return self._current_content if not self._iter_chunks: self._iter_chunks = self._iter_downloader.get_chunk_offsets() - else: + + # initial GET result still has more than _chunk_size bytes of data + if len(self._current_content) >= self._chunk_size: + return self._get_chunk_data() + + try: chunk = next(self._iter_chunks) - self._current_content = self._iter_downloader.yield_chunk(chunk) + self._current_content += self._iter_downloader.yield_chunk(chunk) + except StopIteration as e: + self._complete = True + if self._current_content: + return self._current_content + raise e - return self._current_content + return self._get_chunk_data() next = __next__ # Python 2 compatibility. + def _get_chunk_data(self): + chunk_data = self._current_content[: self._chunk_size] + self._current_content = self._current_content[self._chunk_size:] + return chunk_data + class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes """A streaming object to download from Azure Storage. @@ -371,6 +389,11 @@ def _initial_request(self): return response def chunks(self): + # type: () -> Iterator[bytes] + """Iterate over chunks in the download stream. + + :rtype: Iterator[bytes] + """ if self.size == 0 or self._download_complete: iter_downloader = None else: @@ -395,7 +418,8 @@ def chunks(self): return _ChunkIterator( size=self.size, content=self._current_content, - downloader=iter_downloader) + downloader=iter_downloader, + chunk_size=self._config.max_chunk_get_size) def readall(self): """Download the contents of this file. @@ -493,11 +517,11 @@ def readinto(self, stream): ) if parallel: import concurrent.futures - executor = concurrent.futures.ThreadPoolExecutor(self._max_concurrency) - list(executor.map( - with_current_context(downloader.process_chunk), - downloader.get_chunk_offsets() - )) + with concurrent.futures.ThreadPoolExecutor(self._max_concurrency) as executor: + list(executor.map( + with_current_context(downloader.process_chunk), + downloader.get_chunk_offsets() + )) else: for chunk in downloader.get_chunk_offsets(): downloader.process_chunk(chunk) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_file_client.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_file_client.py index ff0938f..6ac6dc0 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_file_client.py +++ b/azure/multiapi/storagev2/fileshare/v2020_04_08/_file_client.py @@ -97,6 +97,10 @@ def _upload_file_helper( class ShareFileClient(StorageAccountHostsMixin): """A client to interact with a specific file, although that file may not yet exist. + For more optional configuration, please click + `here `_. + :param str account_url: The URI to the storage account. In order to create a client given the full URI to the file, use the :func:`from_file_url` classmethod. @@ -683,8 +687,10 @@ def download_file( length=None, # type: Optional[int] **kwargs ): - # type: (...) -> Iterable[bytes] - """Downloads a file to a stream with automatic chunking. + # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader + """Downloads a file to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the file into + a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. :param int offset: Start of byte range to use for downloading a section of the file. @@ -712,7 +718,8 @@ def download_file( :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str :keyword int timeout: The timeout parameter is expressed in seconds. - :returns: A iterable data generator (stream) + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.fileshare.StorageStreamDownloader .. admonition:: Example: diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_lease.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_lease.py index 61b1d90..7c38145 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_lease.py +++ b/azure/multiapi/storagev2/fileshare/v2020_04_08/_lease.py @@ -7,7 +7,7 @@ import uuid from typing import ( # pylint: disable=unused-import - Optional, Any, TypeVar, TYPE_CHECKING + Union, Optional, Any, TypeVar, TYPE_CHECKING ) from azure.core.tracing.decorator import distributed_trace @@ -25,7 +25,7 @@ class ShareLeaseClient(object): """Creates a new ShareLeaseClient. - This client provides lease operations on a ShareFileClient. + This client provides lease operations on a ShareClient or ShareFileClient. :ivar str id: The ID of the lease currently being maintained. This will be `None` if no @@ -38,8 +38,9 @@ class ShareLeaseClient(object): This will be `None` if no lease has yet been acquired or modified. :param client: - The client of the file to lease. - :type client: ~azure.storage.fileshare.ShareFileClient + The client of the file or share to lease. + :type client: ~azure.storage.fileshare.ShareFileClient or + ~azure.storage.fileshare.ShareClient :param str lease_id: A string representing the lease ID of an existing lease. This value does not need to be specified in order to acquire a new lease, or break one. @@ -47,7 +48,7 @@ class ShareLeaseClient(object): def __init__( self, client, lease_id=None ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs - # type: (ShareFileClient, Optional[str]) -> None + # type: (Union[ShareFileClient, ShareClient], Optional[str]) -> None self.id = lease_id or str(uuid.uuid4()) self.last_modified = None self.etag = None @@ -70,14 +71,20 @@ def __exit__(self, *args): def acquire(self, **kwargs): # type: (**Any) -> None """Requests a new lease. This operation establishes and manages a lock on a - file for write and delete operations. If the file does not have an active lease, - the File service creates a lease on the file. If the file has an active lease, + file or share for write and delete operations. If the file or share does not have an active lease, + the File or Share service creates a lease on the file or share. If the file has an active lease, you can only request a new lease using the active lease ID. - If the file does not have an active lease, the File service creates a + If the file or share does not have an active lease, the File or Share service creates a lease on the file and returns a new lease ID. + :keyword int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be + between 15 and 60 seconds. A share lease duration cannot be changed + using renew or change. Default is -1 (infinite share lease). + :keyword int timeout: The timeout parameter is expressed in seconds. :rtype: None @@ -99,7 +106,7 @@ def acquire(self, **kwargs): self.etag = response.get('etag') # type: str @distributed_trace - def _renew(self, **kwargs): + def renew(self, **kwargs): # type: (Any) -> None """Renews the share lease. @@ -134,8 +141,8 @@ def _renew(self, **kwargs): def release(self, **kwargs): # type: (Any) -> None """Releases the lease. The lease may be released if the lease ID specified on the request matches - that associated with the file. Releasing the lease allows another client to immediately acquire - the lease for the or file as soon as the release is complete. + that associated with the share or file. Releasing the lease allows another client to immediately acquire + the lease for the share or file as soon as the release is complete. :keyword int timeout: The timeout parameter is expressed in seconds. @@ -162,7 +169,7 @@ def change(self, proposed_lease_id, **kwargs): a new lease ID in x-ms-proposed-lease-id. :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The File service will raise an error + Proposed lease ID, in a GUID string format. The File or Share service will raise an error (Invalid request) if the proposed lease ID is not in the correct format. :keyword int timeout: The timeout parameter is expressed in seconds. @@ -186,7 +193,7 @@ def change(self, proposed_lease_id, **kwargs): @distributed_trace def break_lease(self, **kwargs): # type: (Any) -> int - """Force breaks the lease if the file has an active lease. Any authorized request can break the lease; + """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease; the request is not required to specify a matching lease ID. An infinite lease breaks immediately. Once a lease is broken, it cannot be changed. Any authorized request can break the lease; @@ -194,6 +201,19 @@ def break_lease(self, **kwargs): When a lease is successfully broken, the response indicates the interval in seconds until a new lease can be acquired. + :keyword int lease_break_period: + This is the proposed duration of seconds that the share lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the share lease. If longer, the time remaining on the share lease is used. + A new share lease will not be available before the break period has + expired, but the share lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration share lease breaks after the remaining share lease + period elapses, and an infinite share lease breaks immediately. + + .. versionadded:: 12.6.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :return: Approximate time remaining in the lease period, in seconds. diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_models.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_models.py index 796b43b..ef93743 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_models.py +++ b/azure/multiapi/storagev2/fileshare/v2020_04_08/_models.py @@ -274,7 +274,7 @@ class ContentSettings(DictMixin): :param str cache_control: If the cache_control has previously been set for the file, that value is stored. - :param str content_md5: + :param bytearray content_md5: If the content_md5 has been set for the file, this response header is stored so that the client can check for message content integrity. diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_share_client.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_share_client.py index db0d1c9..2b6e5e6 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_share_client.py +++ b/azure/multiapi/storagev2/fileshare/v2020_04_08/_share_client.py @@ -31,7 +31,7 @@ DeleteSnapshotsOptionType, SharePermission) from ._deserialize import deserialize_share_properties, deserialize_permission_key, deserialize_permission -from ._serialize import get_api_version +from ._serialize import get_api_version, get_access_conditions from ._directory_client import ShareDirectoryClient from ._file_client import ShareFileClient from ._lease import ShareLeaseClient @@ -48,6 +48,10 @@ class ShareClient(StorageAccountHostsMixin): For operations relating to a specific directory or file in this share, the clients for those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. + For more optional configuration, please click + `here `_. + :param str account_url: The URI to the storage account. In order to create a client given the full URI to the share, use the :func:`from_share_url` classmethod. @@ -265,21 +269,21 @@ def get_file_client(self, file_path): _pipeline=_pipeline, _location_mode=self._location_mode) @distributed_trace - def _acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], **Any) -> ShareLeaseClient + def acquire_lease(self, **kwargs): + # type: (**Any) -> ShareLeaseClient """Requests a new lease. If the share does not have an active lease, the Share Service creates a lease on the share and returns a new lease. - .. versionadded:: 12.6.0 + .. versionadded:: 12.5.0 - :param int lease_duration: + :keyword int lease_duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be changed using renew or change. Default is -1 (infinite lease). - :param str lease_id: + :keyword str lease_id: Proposed lease ID, in a GUID string format. The Share Service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. @@ -297,7 +301,8 @@ def _acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): :dedent: 8 :caption: Acquiring a lease on a share. """ - kwargs['lease_duration'] = lease_duration + kwargs['lease_duration'] = kwargs.pop('lease_duration', -1) + lease_id = kwargs.pop('lease_id', None) lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore lease.acquire(**kwargs) return lease @@ -317,7 +322,7 @@ def create_share(self, **kwargs): Possible values: 'TransactionOptimized', 'Hot', 'Cool' :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - .. versionadded:: 12.6.0 + .. versionadded:: 12.4.0 :keyword int timeout: The timeout parameter is expressed in seconds. @@ -423,6 +428,13 @@ def delete_share( :param bool delete_snapshots: Indicates if snapshots are to be deleted. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + :keyword int timeout: The timeout parameter is expressed in seconds. @@ -435,6 +447,7 @@ def delete_share( :dedent: 12 :caption: Deletes the share and any snapshots. """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) timeout = kwargs.pop('timeout', None) delete_include = None if delete_snapshots: @@ -443,6 +456,7 @@ def delete_share( self._client.share.delete( timeout=timeout, sharesnapshot=self.snapshot, + lease_access_conditions=access_conditions, delete_snapshots=delete_include, **kwargs) except HttpResponseError as error: @@ -457,6 +471,13 @@ def get_share_properties(self, **kwargs): :keyword int timeout: The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + :returns: The share properties. :rtype: ~azure.storage.fileshare.ShareProperties @@ -469,12 +490,14 @@ def get_share_properties(self, **kwargs): :dedent: 12 :caption: Gets the share properties. """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) timeout = kwargs.pop('timeout', None) try: props = self._client.share.get_properties( timeout=timeout, sharesnapshot=self.snapshot, cls=deserialize_share_properties, + lease_access_conditions=access_conditions, **kwargs) except HttpResponseError as error: process_storage_error(error) @@ -492,6 +515,13 @@ def set_share_quota(self, quota, **kwargs): Must be greater than 0, and less than or equal to 5TB. :keyword int timeout: The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + :returns: Share-updated property dict (Etag and last modified). :rtype: dict(str, Any) @@ -504,12 +534,14 @@ def set_share_quota(self, quota, **kwargs): :dedent: 12 :caption: Sets the share quota. """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) timeout = kwargs.pop('timeout', None) try: return self._client.share.set_properties( # type: ignore timeout=timeout, quota=quota, access_tier=None, + lease_access_conditions=access_conditions, cls=return_response_headers, **kwargs) except HttpResponseError as error: @@ -520,7 +552,7 @@ def set_share_properties(self, **kwargs): # type: (Any) -> Dict[str, Any] """Sets the share properties. - .. versionadded:: 12.6.0 + .. versionadded:: 12.4.0 :keyword access_tier: Specifies the access tier of the share. @@ -535,6 +567,9 @@ def set_share_properties(self, **kwargs): Root squash to set on the share. Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. :returns: Share-updated property dict (Etag and last modified). :rtype: dict(str, Any) @@ -547,6 +582,7 @@ def set_share_properties(self, **kwargs): :dedent: 12 :caption: Sets the share properties. """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) timeout = kwargs.pop('timeout', None) access_tier = kwargs.pop('access_tier', None) quota = kwargs.pop('quota', None) @@ -559,6 +595,7 @@ def set_share_properties(self, **kwargs): quota=quota, access_tier=access_tier, root_squash=root_squash, + lease_access_conditions=access_conditions, cls=return_response_headers, **kwargs) except HttpResponseError as error: @@ -578,6 +615,13 @@ def set_share_metadata(self, metadata, **kwargs): :type metadata: dict(str, str) :keyword int timeout: The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + :returns: Share-updated property dict (Etag and last modified). :rtype: dict(str, Any) @@ -590,6 +634,7 @@ def set_share_metadata(self, metadata, **kwargs): :dedent: 12 :caption: Sets the share metadata. """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) timeout = kwargs.pop('timeout', None) headers = kwargs.pop('headers', {}) headers.update(add_metadata_headers(metadata)) @@ -598,6 +643,7 @@ def set_share_metadata(self, metadata, **kwargs): timeout=timeout, cls=return_response_headers, headers=headers, + lease_access_conditions=access_conditions, **kwargs) except HttpResponseError as error: process_storage_error(error) @@ -610,14 +656,23 @@ def get_share_access_policy(self, **kwargs): :keyword int timeout: The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + :returns: Access policy information in a dict. :rtype: dict[str, Any] """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) timeout = kwargs.pop('timeout', None) try: response, identifiers = self._client.share.get_access_policy( timeout=timeout, cls=return_headers_and_deserialized, + lease_access_conditions=access_conditions, **kwargs) except HttpResponseError as error: process_storage_error(error) @@ -640,9 +695,17 @@ def set_share_access_policy(self, signed_identifiers, **kwargs): :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) :keyword int timeout: The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + :returns: Share-updated property dict (Etag and last modified). :rtype: dict(str, Any) """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) timeout = kwargs.pop('timeout', None) if len(signed_identifiers) > 5: raise ValueError( @@ -660,6 +723,7 @@ def set_share_access_policy(self, signed_identifiers, **kwargs): share_acl=signed_identifiers or None, timeout=timeout, cls=return_response_headers, + lease_access_conditions=access_conditions, **kwargs) except HttpResponseError as error: process_storage_error(error) @@ -674,13 +738,22 @@ def get_share_stats(self, **kwargs): :keyword int timeout: The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + :return: The approximate size of the data (in bytes) stored on the share. :rtype: int """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) timeout = kwargs.pop('timeout', None) try: stats = self._client.share.get_statistics( timeout=timeout, + lease_access_conditions=access_conditions, **kwargs) return stats.share_usage_bytes # type: ignore except HttpResponseError as error: diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_share_service_client.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_share_service_client.py index 74c28d8..600426b 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_share_service_client.py +++ b/azure/multiapi/storagev2/fileshare/v2020_04_08/_share_service_client.py @@ -49,6 +49,10 @@ class ShareServiceClient(StorageAccountHostsMixin): For operations relating to a specific share, a client for that entity can also be retrieved using the :func:`get_share_client` function. + For more optional configuration, please click + `here `_. + :param str account_url: The URL to the file share storage account. Any other entities included in the URL path (e.g. share or file) will be discarded. This URL can be optionally diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/base_client.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/base_client.py index 7b0258f..5e524b2 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/base_client.py +++ b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/base_client.py @@ -3,19 +3,13 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- - +import logging +import uuid from typing import ( # pylint: disable=unused-import - Union, Optional, Any, - Iterable, - Dict, - List, - Type, Tuple, - TYPE_CHECKING, ) -import logging try: from urllib.parse import parse_qs, quote @@ -45,6 +39,7 @@ from .models import LocationMode from .authentication import SharedKeyCredentialPolicy from .shared_access_signature import QueryStringConstants +from .request_handlers import serialize_batch_body, _get_batch_request_delimiter from .policies import ( StorageHeadersPolicy, StorageContentValidation, @@ -61,13 +56,12 @@ _LOGGER = logging.getLogger(__name__) _SERVICE_PARAMS = { - "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, - "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, - "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, - "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"}, + "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, + "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, + "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, + "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, } - class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes def __init__( self, @@ -262,33 +256,52 @@ def _create_pipeline(self, credential, **kwargs): return config, Pipeline(config.transport, policies=policies) def _batch_send( - self, *reqs, # type: HttpRequest + self, + *reqs, # type: HttpRequest **kwargs ): """Given a series of request, do a Storage batch call. """ # Pop it here, so requests doesn't feel bad about additional kwarg raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + batch_id = str(uuid.uuid1()) + request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), + url='{}://{}/{}?{}comp=batch{}{}'.format( + self.scheme, + self.primary_hostname, + kwargs.pop('path', ""), + kwargs.pop('restype', ""), + kwargs.pop('sas', ""), + kwargs.pop('timeout', "") + ), headers={ - 'x-ms-version': self.api_version + 'x-ms-version': self.api_version, + "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) } ) + policies = [StorageHeadersPolicy()] + if self._credential_policy: + policies.append(self._credential_policy) + request.set_multipart_mixed( *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], + policies=policies, enforce_https=False ) + Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access + body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) + request.set_bytes_body(body) + + temp = request.multipart_mixed_info + request.multipart_mixed_info = None pipeline_response = self._pipeline.run( request, **kwargs ) response = pipeline_response.http_response + request.multipart_mixed_info = temp try: if response.status_code not in [202]: @@ -350,15 +363,15 @@ def parse_connection_str(conn_str, credential, service): conn_settings = [s.split("=", 1) for s in conn_str.split(";")] if any(len(tup) != 2 for tup in conn_settings): raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict(conn_settings) + conn_settings = dict((key.upper(), val) for key, val in conn_settings) endpoints = _SERVICE_PARAMS[service] primary = None secondary = None if not credential: try: - credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]} + credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} except KeyError: - credential = conn_settings.get("SharedAccessSignature") + credential = conn_settings.get("SHAREDACCESSSIGNATURE") if endpoints["primary"] in conn_settings: primary = conn_settings[endpoints["primary"]] if endpoints["secondary"] in conn_settings: @@ -368,13 +381,13 @@ def parse_connection_str(conn_str, credential, service): raise ValueError("Connection string specifies only secondary endpoint.") try: primary = "{}://{}.{}.{}".format( - conn_settings["DefaultEndpointsProtocol"], - conn_settings["AccountName"], + conn_settings["DEFAULTENDPOINTSPROTOCOL"], + conn_settings["ACCOUNTNAME"], service, - conn_settings["EndpointSuffix"], + conn_settings["ENDPOINTSUFFIX"], ) secondary = "{}-secondary.{}.{}".format( - conn_settings["AccountName"], service, conn_settings["EndpointSuffix"] + conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] ) except KeyError: pass @@ -382,7 +395,7 @@ def parse_connection_str(conn_str, credential, service): if not primary: try: primary = "https://{}.{}.{}".format( - conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE) + conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) ) except KeyError: raise ValueError("Connection string missing required connection details.") @@ -411,6 +424,9 @@ def create_configuration(**kwargs): # Page blob uploads config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) + # Datalake file uploads + config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) + # Blob downloads config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/request_handlers.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/request_handlers.py index 4f15b65..37354d7 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/request_handlers.py +++ b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/request_handlers.py @@ -20,6 +20,10 @@ _LOGGER = logging.getLogger(__name__) +_REQUEST_DELIMITER_PREFIX = "batch_" +_HTTP1_1_IDENTIFIER = "HTTP/1.1" +_HTTP_LINE_ENDING = "\r\n" + def serialize_iso(attr): """Serialize Datetime object into ISO-8601 formatted string. @@ -145,3 +149,125 @@ def add_metadata_headers(metadata=None): for key, value in metadata.items(): headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value return headers + + +def serialize_batch_body(requests, batch_id): + """ + -- + + -- + (repeated as needed) + ---- + + Serializes the requests in this batch to a single HTTP mixed/multipart body. + + :param list[~azure.core.pipeline.transport.HttpRequest] requests: + a list of sub-request for the batch request + :param str batch_id: + to be embedded in batch sub-request delimiter + :return: The body bytes for this batch. + """ + + if requests is None or len(requests) == 0: + raise ValueError('Please provide sub-request(s) for this batch request') + + delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') + newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') + batch_body = list() + + content_index = 0 + for request in requests: + request.headers.update({ + "Content-ID": str(content_index), + "Content-Length": str(0) + }) + batch_body.append(delimiter_bytes) + batch_body.append(_make_body_from_sub_request(request)) + batch_body.append(newline_bytes) + content_index += 1 + + batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) + # final line of body MUST have \r\n at the end, or it will not be properly read by the service + batch_body.append(newline_bytes) + + return bytes().join(batch_body) + + +def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): + """ + Gets the delimiter used for this batch request's mixed/multipart HTTP format. + + :param str batch_id: + Randomly generated id + :param bool is_prepend_dashes: + Whether to include the starting dashes. Used in the body, but non on defining the delimiter. + :param bool is_append_dashes: + Whether to include the ending dashes. Used in the body on the closing delimiter only. + :return: The delimiter, WITHOUT a trailing newline. + """ + + prepend_dashes = '--' if is_prepend_dashes else '' + append_dashes = '--' if is_append_dashes else '' + + return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes + + +def _make_body_from_sub_request(sub_request): + """ + Content-Type: application/http + Content-ID: + Content-Transfer-Encoding: (if present) + + HTTP/ +
:
(repeated as necessary) + Content-Length: + (newline if content length > 0) + (if content length > 0) + + Serializes an http request. + + :param ~azure.core.pipeline.transport.HttpRequest sub_request: + Request to serialize. + :return: The serialized sub-request in bytes + """ + + # put the sub-request's headers into a list for efficient str concatenation + sub_request_body = list() + + # get headers for ease of manipulation; remove headers as they are used + headers = sub_request.headers + + # append opening headers + sub_request_body.append("Content-Type: application/http") + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-ID: ") + sub_request_body.append(headers.pop("Content-ID", "")) + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-Transfer-Encoding: binary") + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + # append HTTP verb and path and query and HTTP version + sub_request_body.append(sub_request.method) + sub_request_body.append(' ') + sub_request_body.append(sub_request.url) + sub_request_body.append(' ') + sub_request_body.append(_HTTP1_1_IDENTIFIER) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) + for header_name, header_value in headers.items(): + if header_value is not None: + sub_request_body.append(header_name) + sub_request_body.append(": ") + sub_request_body.append(header_value) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/uploads.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/uploads.py index abf3fb2..1b619df 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/uploads.py +++ b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/uploads.py @@ -77,13 +77,13 @@ def upload_data_chunks( validate_content=validate_content, **kwargs) if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + executor.submit(with_current_context(uploader.process_chunk), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) else: range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] if any(range_ids): @@ -112,16 +112,18 @@ def upload_substream_blocks( **kwargs) if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + executor.submit(with_current_context(uploader.process_substream_block), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) else: range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - return sorted(range_ids) + if any(range_ids): + return sorted(range_ids) + return [] class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes @@ -221,16 +223,16 @@ def get_substream_blocks(self): for i in range(blocks): index = i * self.chunk_size length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) + yield index, SubStream(self.stream, index, length, lock) def process_substream_block(self, block_data): return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - def _upload_substream_block(self, block_id, block_stream): + def _upload_substream_block(self, index, block_stream): raise NotImplementedError("Must be implemented by child class.") - def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = self._upload_substream_block(block_id, block_stream) + def _upload_substream_block_with_progress(self, index, block_stream): + range_id = self._upload_substream_block(index, block_stream) self._update_progress(len(block_stream)) return range_id @@ -260,8 +262,9 @@ def _upload_chunk(self, chunk_offset, chunk_data): ) return index, block_id - def _upload_substream_block(self, block_id, block_stream): + def _upload_substream_block(self, index, block_stream): try: + block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) self.service.stage_block( block_id, len(block_stream), @@ -289,7 +292,7 @@ def _upload_chunk(self, chunk_offset, chunk_data): content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) computed_md5 = None self.response_headers = self.service.upload_pages( - chunk_data, + body=chunk_data, content_length=len(chunk_data), transactional_content_md5=computed_md5, range=content_range, @@ -302,6 +305,9 @@ def _upload_chunk(self, chunk_offset, chunk_data): if not self.parallel and self.request_options.get('modified_access_conditions'): self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + def _upload_substream_block(self, index, block_stream): + pass + class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method @@ -312,7 +318,7 @@ def __init__(self, *args, **kwargs): def _upload_chunk(self, chunk_offset, chunk_data): if self.current_length is None: self.response_headers = self.service.append_block( - chunk_data, + body=chunk_data, content_length=len(chunk_data), cls=return_response_headers, data_stream_total=self.total_size, @@ -324,7 +330,7 @@ def _upload_chunk(self, chunk_offset, chunk_data): self.request_options['append_position_access_conditions'].append_position = \ self.current_length + chunk_offset self.response_headers = self.service.append_block( - chunk_data, + body=chunk_data, content_length=len(chunk_data), cls=return_response_headers, data_stream_total=self.total_size, @@ -332,6 +338,41 @@ def _upload_chunk(self, chunk_offset, chunk_data): **self.request_options ) + def _upload_substream_block(self, index, block_stream): + pass + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + self.response_headers = self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + def _upload_substream_block(self, index, block_stream): + try: + self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method @@ -348,6 +389,10 @@ def _upload_chunk(self, chunk_offset, chunk_data): ) return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response + # TODO: Implement this method. + def _upload_substream_block(self, index, block_stream): + pass + class SubStream(IOBase): @@ -432,6 +477,13 @@ def read(self, size=None): raise IOError("Stream failed to seek to the desired location.") buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) else: + absolute_position = self._stream_begin_index + self._position + # It's possible that there's connection problem during data transfer, + # so when we retry we don't want to read from current position of wrapped stream, + # instead we should seek to where we want to read from. + if self._wrapped_stream.tell() != absolute_position: + self._wrapped_stream.seek(absolute_position, SEEK_SET) + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) if buffer_from_stream: diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/uploads_async.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/uploads_async.py index f6a8725..5ed192b 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/uploads_async.py +++ b/azure/multiapi/storagev2/fileshare/v2020_04_08/_shared/uploads_async.py @@ -124,7 +124,9 @@ async def upload_substream_blocks( range_ids = [] for block in uploader.get_substream_blocks(): range_ids.append(await uploader.process_substream_block(block)) - return sorted(range_ids) + if any(range_ids): + return sorted(range_ids) + return class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes @@ -224,16 +226,16 @@ def get_substream_blocks(self): for i in range(blocks): index = i * self.chunk_size length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) + yield index, SubStream(self.stream, index, length, lock) async def process_substream_block(self, block_data): return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - async def _upload_substream_block(self, block_id, block_stream): + async def _upload_substream_block(self, index, block_stream): raise NotImplementedError("Must be implemented by child class.") - async def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = await self._upload_substream_block(block_id, block_stream) + async def _upload_substream_block_with_progress(self, index, block_stream): + range_id = await self._upload_substream_block(index, block_stream) await self._update_progress(len(block_stream)) return range_id @@ -256,14 +258,15 @@ async def _upload_chunk(self, chunk_offset, chunk_data): await self.service.stage_block( block_id, len(chunk_data), - chunk_data, + body=chunk_data, data_stream_total=self.total_size, upload_stream_current=self.progress_total, **self.request_options) return index, block_id - async def _upload_substream_block(self, block_id, block_stream): + async def _upload_substream_block(self, index, block_stream): try: + block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) await self.service.stage_block( block_id, len(block_stream), @@ -293,7 +296,7 @@ async def _upload_chunk(self, chunk_offset, chunk_data): content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) computed_md5 = None self.response_headers = await self.service.upload_pages( - chunk_data, + body=chunk_data, content_length=len(chunk_data), transactional_content_md5=computed_md5, range=content_range, @@ -305,6 +308,9 @@ async def _upload_chunk(self, chunk_offset, chunk_data): if not self.parallel and self.request_options.get('modified_access_conditions'): self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + async def _upload_substream_block(self, index, block_stream): + pass + class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method @@ -315,7 +321,7 @@ def __init__(self, *args, **kwargs): async def _upload_chunk(self, chunk_offset, chunk_data): if self.current_length is None: self.response_headers = await self.service.append_block( - chunk_data, + body=chunk_data, content_length=len(chunk_data), cls=return_response_headers, data_stream_total=self.total_size, @@ -326,13 +332,47 @@ async def _upload_chunk(self, chunk_offset, chunk_data): self.request_options['append_position_access_conditions'].append_position = \ self.current_length + chunk_offset self.response_headers = await self.service.append_block( - chunk_data, + body=chunk_data, content_length=len(chunk_data), cls=return_response_headers, data_stream_total=self.total_size, upload_stream_current=self.progress_total, **self.request_options) + async def _upload_substream_block(self, index, block_stream): + pass + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + self.response_headers = await self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + async def _upload_substream_block(self, index, block_stream): + try: + await self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method @@ -349,3 +389,7 @@ async def _upload_chunk(self, chunk_offset, chunk_data): ) range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) return range_id, response + + # TODO: Implement this method. + async def _upload_substream_block(self, index, block_stream): + pass diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/_version.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/_version.py index 37eadd1..d731da5 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/_version.py +++ b/azure/multiapi/storagev2/fileshare/v2020_04_08/_version.py @@ -4,4 +4,4 @@ # license information. # -------------------------------------------------------------------------- -VERSION = "12.4.1" +VERSION = "12.5.0" diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_download_async.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_download_async.py index c0db16d..b046fc0 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_download_async.py +++ b/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_download_async.py @@ -10,6 +10,7 @@ from itertools import islice import warnings +from typing import AsyncIterator from azure.core.exceptions import HttpResponseError from .._shared.encryption import decrypt_blob from .._shared.request_handlers import validate_and_format_range_headers @@ -101,10 +102,11 @@ async def _download_chunk(self, chunk_start, chunk_end): class _AsyncChunkIterator(object): - """Async iterator for chunks in file download stream.""" + """Async iterator for chunks in blob download stream.""" - def __init__(self, size, content, downloader): + def __init__(self, size, content, downloader, chunk_size): self.size = size + self._chunk_size = chunk_size self._current_content = content self._iter_downloader = downloader self._iter_chunks = None @@ -124,21 +126,35 @@ async def __anext__(self): if self._complete: raise StopAsyncIteration("Download complete") if not self._iter_downloader: - # If no iterator was supplied, the download completed with - # the initial GET, so we just return that data + # cut the data obtained from initial GET into chunks + if len(self._current_content) > self._chunk_size: + return self._get_chunk_data() self._complete = True return self._current_content if not self._iter_chunks: self._iter_chunks = self._iter_downloader.get_chunk_offsets() - else: - try: - chunk = next(self._iter_chunks) - except StopIteration: - raise StopAsyncIteration("Download complete") - self._current_content = await self._iter_downloader.yield_chunk(chunk) - return self._current_content + # initial GET result still has more than _chunk_size bytes of data + if len(self._current_content) >= self._chunk_size: + return self._get_chunk_data() + + try: + chunk = next(self._iter_chunks) + self._current_content += await self._iter_downloader.yield_chunk(chunk) + except StopIteration: + self._complete = True + # it's likely that there some data left in self._current_content + if self._current_content: + return self._current_content + raise StopAsyncIteration("Download complete") + + return self._get_chunk_data() + + def _get_chunk_data(self): + chunk_data = self._current_content[: self._chunk_size] + self._current_content = self._current_content[self._chunk_size:] + return chunk_data class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes @@ -304,9 +320,10 @@ async def _initial_request(self): return response def chunks(self): + # type: () -> AsyncIterator[bytes] """Iterate over chunks in the download stream. - :rtype: Iterable[bytes] + :rtype: AsyncIterator[bytes] """ if self.size == 0 or self._download_complete: iter_downloader = None @@ -331,7 +348,9 @@ def chunks(self): return _AsyncChunkIterator( size=self.size, content=self._current_content, - downloader=iter_downloader) + downloader=iter_downloader, + chunk_size=self._config.max_chunk_get_size + ) async def readall(self): """Download the contents of this file. diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_file_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_file_client_async.py index ef8e770..6c21d22 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_file_client_async.py +++ b/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_file_client_async.py @@ -561,8 +561,10 @@ async def download_file( length=None, # type: Optional[int] **kwargs ): - # type: (...) -> Iterable[bytes] - """Downloads a file to a stream with automatic chunking. + # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader + """Downloads a file to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the file into + a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. :param int offset: Start of byte range to use for downloading a section of the file. @@ -590,7 +592,8 @@ async def download_file( :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str :keyword int timeout: The timeout parameter is expressed in seconds. - :returns: A iterable data generator (stream) + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.fileshare.aio.StorageStreamDownloader .. admonition:: Example: diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_lease_async.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_lease_async.py index 82c1550..0d99845 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_lease_async.py +++ b/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_lease_async.py @@ -5,7 +5,7 @@ # -------------------------------------------------------------------------- # pylint: disable=invalid-overridden-method from typing import ( # pylint: disable=unused-import - Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, + Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, TypeVar, TYPE_CHECKING ) @@ -25,7 +25,7 @@ class ShareLeaseClient(LeaseClientBase): """Creates a new ShareLeaseClient. - This client provides lease operations on a ShareFileClient. + This client provides lease operations on a ShareClient or ShareFileClient. :ivar str id: The ID of the lease currently being maintained. This will be `None` if no @@ -38,8 +38,9 @@ class ShareLeaseClient(LeaseClientBase): This will be `None` if no lease has yet been acquired or modified. :param client: - The client of the file to lease. - :type client: ~azure.storage.fileshare.ShareFileClient + The client of the file or share to lease. + :type client: ~azure.storage.fileshare.ShareFileClient or + ~azure.storage.fileshare.ShareClient :param str lease_id: A string representing the lease ID of an existing lease. This value does not need to be specified in order to acquire a new lease, or break one. @@ -61,14 +62,20 @@ async def __aexit__(self, *args): async def acquire(self, **kwargs): # type: (**Any) -> None """Requests a new lease. This operation establishes and manages a lock on a - file for write and delete operations. If the file does not have an active lease, - the File service creates a lease on the file. If the file has an active lease, + file or share for write and delete operations. If the file or share does not have an active lease, + the File or Share service creates a lease on the file or share. If the file has an active lease, you can only request a new lease using the active lease ID. - If the file does not have an active lease, the File service creates a + If the file or share does not have an active lease, the File or Share service creates a lease on the file and returns a new lease ID. + :keyword int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be + between 15 and 60 seconds. A share lease duration cannot be changed + using renew or change. Default is -1 (infinite share lease). + :keyword int timeout: The timeout parameter is expressed in seconds. :rtype: None @@ -90,7 +97,7 @@ async def acquire(self, **kwargs): self.etag = response.get('etag') # type: str @distributed_trace_async - async def _renew(self, **kwargs): + async def renew(self, **kwargs): # type: (Any) -> None """Renews the share lease. @@ -125,8 +132,8 @@ async def _renew(self, **kwargs): async def release(self, **kwargs): # type: (Any) -> None """Releases the lease. The lease may be released if the lease ID specified on the request matches - that associated with the file. Releasing the lease allows another client to immediately acquire - the lease for the file as soon as the release is complete. + that associated with the share or file. Releasing the lease allows another client to immediately acquire + the lease for the share or file as soon as the release is complete. :keyword int timeout: The timeout parameter is expressed in seconds. @@ -153,7 +160,7 @@ async def change(self, proposed_lease_id, **kwargs): a new lease ID in x-ms-proposed-lease-id. :param str proposed_lease_id: - Proposed lease ID, in a GUID string format. The File service raises an error + Proposed lease ID, in a GUID string format. The File or Share service raises an error (Invalid request) if the proposed lease ID is not in the correct format. :keyword int timeout: The timeout parameter is expressed in seconds. @@ -177,7 +184,7 @@ async def change(self, proposed_lease_id, **kwargs): @distributed_trace_async async def break_lease(self, **kwargs): # type: (Any) -> int - """Force breaks the lease if the file has an active lease. Any authorized request can break the lease; + """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease; the request is not required to specify a matching lease ID. An infinite lease breaks immediately. Once a lease is broken, it cannot be changed. Any authorized request can break the lease; @@ -185,6 +192,19 @@ async def break_lease(self, **kwargs): When a lease is successfully broken, the response indicates the interval in seconds until a new lease can be acquired. + :keyword int lease_break_period: + This is the proposed duration of seconds that the share lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the share lease. If longer, the time remaining on the share lease is used. + A new share lease will not be available before the break period has + expired, but the share lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration share lease breaks after the remaining share lease + period elapses, and an infinite share lease breaks immediately. + + .. versionadded:: 12.5.0 + :keyword int timeout: The timeout parameter is expressed in seconds. :return: Approximate time remaining in the lease period, in seconds. diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_share_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_share_client_async.py index 5ceae13..05fc93b 100644 --- a/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_share_client_async.py +++ b/azure/multiapi/storagev2/fileshare/v2020_04_08/aio/_share_client_async.py @@ -24,7 +24,7 @@ SignedIdentifier, DeleteSnapshotsOptionType) from .._deserialize import deserialize_share_properties, deserialize_permission -from .._serialize import get_api_version +from .._serialize import get_api_version, get_access_conditions from .._share_client import ShareClient as ShareClientBase from ._directory_client_async import ShareDirectoryClient from ._file_client_async import ShareFileClient @@ -130,21 +130,21 @@ def get_file_client(self, file_path): _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) @distributed_trace_async() - async def _acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): - # type: (int, Optional[str], **Any) -> ShareLeaseClient + async def acquire_lease(self, **kwargs): + # type: (**Any) -> ShareLeaseClient """Requests a new lease. If the share does not have an active lease, the Share Service creates a lease on the share and returns a new lease. - .. versionadded:: 12.6.0 + .. versionadded:: 12.5.0 - :param int lease_duration: + :keyword int lease_duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be changed using renew or change. Default is -1 (infinite lease). - :param str lease_id: + :keyword str lease_id: Proposed lease ID, in a GUID string format. The Share Service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. @@ -162,7 +162,8 @@ async def _acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): :dedent: 8 :caption: Acquiring a lease on a share. """ - kwargs['lease_duration'] = lease_duration + kwargs['lease_duration'] = kwargs.pop('lease_duration', -1) + lease_id = kwargs.pop('lease_id', None) lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore await lease.acquire(**kwargs) return lease @@ -182,7 +183,7 @@ async def create_share(self, **kwargs): Possible values: 'TransactionOptimized', 'Hot', 'Cool' :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier - .. versionadded:: 12.6.0 + .. versionadded:: 12.4.0 :keyword int timeout: The timeout parameter is expressed in seconds. @@ -290,6 +291,13 @@ async def delete_share( Indicates if snapshots are to be deleted. :keyword int timeout: The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + .. admonition:: Example: .. literalinclude:: ../samples/file_samples_share_async.py @@ -299,6 +307,7 @@ async def delete_share( :dedent: 16 :caption: Deletes the share and any snapshots. """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) timeout = kwargs.pop('timeout', None) delete_include = None if delete_snapshots: @@ -308,6 +317,7 @@ async def delete_share( timeout=timeout, sharesnapshot=self.snapshot, delete_snapshots=delete_include, + lease_access_conditions=access_conditions, **kwargs) except HttpResponseError as error: process_storage_error(error) @@ -321,6 +331,13 @@ async def get_share_properties(self, **kwargs): :keyword int timeout: The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + :returns: The share properties. :rtype: ~azure.storage.fileshare.ShareProperties @@ -333,12 +350,14 @@ async def get_share_properties(self, **kwargs): :dedent: 16 :caption: Gets the share properties. """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) timeout = kwargs.pop('timeout', None) try: props = await self._client.share.get_properties( timeout=timeout, sharesnapshot=self.snapshot, cls=deserialize_share_properties, + lease_access_conditions=access_conditions, **kwargs) except HttpResponseError as error: process_storage_error(error) @@ -356,6 +375,13 @@ async def set_share_quota(self, quota, **kwargs): Must be greater than 0, and less than or equal to 5TB. :keyword int timeout: The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + :returns: Share-updated property dict (Etag and last modified). :rtype: dict(str, Any) @@ -368,6 +394,7 @@ async def set_share_quota(self, quota, **kwargs): :dedent: 16 :caption: Sets the share quota. """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) timeout = kwargs.pop('timeout', None) try: return await self._client.share.set_properties( # type: ignore @@ -375,6 +402,7 @@ async def set_share_quota(self, quota, **kwargs): quota=quota, access_tier=None, cls=return_response_headers, + lease_access_conditions=access_conditions, **kwargs) except HttpResponseError as error: process_storage_error(error) @@ -383,7 +411,7 @@ async def set_share_properties(self, **kwargs): # type: (Any) -> Dict[str, Any] """Sets the share properties. - .. versionadded:: 12.6.0 + .. versionadded:: 12.3.0 :keyword access_tier: Specifies the access tier of the share. @@ -398,6 +426,9 @@ async def set_share_properties(self, **kwargs): Root squash to set on the share. Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash' :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. :returns: Share-updated property dict (Etag and last modified). :rtype: dict(str, Any) @@ -410,6 +441,7 @@ async def set_share_properties(self, **kwargs): :dedent: 16 :caption: Sets the share properties. """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) timeout = kwargs.pop('timeout', None) access_tier = kwargs.pop('access_tier', None) quota = kwargs.pop('quota', None) @@ -422,6 +454,7 @@ async def set_share_properties(self, **kwargs): quota=quota, access_tier=access_tier, root_squash=root_squash, + lease_access_conditions=access_conditions, cls=return_response_headers, **kwargs) except HttpResponseError as error: @@ -441,6 +474,13 @@ async def set_share_metadata(self, metadata, **kwargs): :type metadata: dict(str, str) :keyword int timeout: The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + :returns: Share-updated property dict (Etag and last modified). :rtype: dict(str, Any) @@ -453,6 +493,7 @@ async def set_share_metadata(self, metadata, **kwargs): :dedent: 16 :caption: Sets the share metadata. """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) timeout = kwargs.pop('timeout', None) headers = kwargs.pop('headers', {}) headers.update(add_metadata_headers(metadata)) @@ -461,6 +502,7 @@ async def set_share_metadata(self, metadata, **kwargs): timeout=timeout, cls=return_response_headers, headers=headers, + lease_access_conditions=access_conditions, **kwargs) except HttpResponseError as error: process_storage_error(error) @@ -473,14 +515,23 @@ async def get_share_access_policy(self, **kwargs): :keyword int timeout: The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + :returns: Access policy information in a dict. :rtype: dict[str, Any] """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) timeout = kwargs.pop('timeout', None) try: response, identifiers = await self._client.share.get_access_policy( timeout=timeout, cls=return_headers_and_deserialized, + lease_access_conditions=access_conditions, **kwargs) except HttpResponseError as error: process_storage_error(error) @@ -503,9 +554,17 @@ async def set_share_access_policy(self, signed_identifiers, **kwargs): :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) :keyword int timeout: The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + :returns: Share-updated property dict (Etag and last modified). :rtype: dict(str, Any) """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) timeout = kwargs.pop('timeout', None) if len(signed_identifiers) > 5: raise ValueError( @@ -524,6 +583,7 @@ async def set_share_access_policy(self, signed_identifiers, **kwargs): share_acl=signed_identifiers or None, timeout=timeout, cls=return_response_headers, + lease_access_conditions=access_conditions, **kwargs) except HttpResponseError as error: process_storage_error(error) @@ -538,13 +598,22 @@ async def get_share_stats(self, **kwargs): :keyword int timeout: The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + :return: The approximate size of the data (in bytes) stored on the share. :rtype: int """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) timeout = kwargs.pop('timeout', None) try: stats = await self._client.share.get_statistics( timeout=timeout, + lease_access_conditions=access_conditions, **kwargs) return stats.share_usage_bytes # type: ignore except HttpResponseError as error: diff --git a/azure/multiapi/storagev2/fileshare/v2020_04_08/py.typed b/azure/multiapi/storagev2/fileshare/v2020_04_08/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/__init__.py new file mode 100644 index 0000000..af67e01 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/__init__.py @@ -0,0 +1,82 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ._version import VERSION +from ._file_client import ShareFileClient +from ._directory_client import ShareDirectoryClient +from ._share_client import ShareClient +from ._share_service_client import ShareServiceClient +from ._lease import ShareLeaseClient +from ._shared_access_signature import generate_account_sas, generate_share_sas, generate_file_sas +from ._shared.policies import ExponentialRetry, LinearRetry +from ._shared.models import ( + LocationMode, + ResourceTypes, + AccountSasPermissions, + StorageErrorCode) +from ._models import ( + ShareProperties, + DirectoryProperties, + Handle, + FileProperties, + Metrics, + RetentionPolicy, + CorsRule, + ShareSmbSettings, + SmbMultichannel, + ShareProtocolSettings, + ShareProtocols, + AccessPolicy, + FileSasPermissions, + ShareSasPermissions, + ContentSettings, + NTFSAttributes) +from ._generated.models import ( + HandleItem, + ShareAccessTier +) +from ._generated.models import ( + ShareRootSquash +) + +__version__ = VERSION + + +__all__ = [ + 'ShareFileClient', + 'ShareDirectoryClient', + 'ShareClient', + 'ShareServiceClient', + 'ShareLeaseClient', + 'ExponentialRetry', + 'LinearRetry', + 'LocationMode', + 'ResourceTypes', + 'AccountSasPermissions', + 'StorageErrorCode', + 'Metrics', + 'RetentionPolicy', + 'CorsRule', + 'ShareSmbSettings', + 'ShareAccessTier', + 'SmbMultichannel', + 'ShareProtocolSettings', + 'AccessPolicy', + 'FileSasPermissions', + 'ShareSasPermissions', + 'ShareProtocols', + 'ShareProperties', + 'DirectoryProperties', + 'FileProperties', + 'ContentSettings', + 'Handle', + 'NTFSAttributes', + 'HandleItem', + 'ShareRootSquash', + 'generate_account_sas', + 'generate_share_sas', + 'generate_file_sas' +] diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_deserialize.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_deserialize.py new file mode 100644 index 0000000..6839469 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_deserialize.py @@ -0,0 +1,83 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use +from typing import ( # pylint: disable=unused-import + Tuple, Dict, List, + TYPE_CHECKING +) + +from ._models import ShareProperties, DirectoryProperties, FileProperties +from ._shared.response_handlers import deserialize_metadata +from ._generated.models import ShareFileRangeList + + +def deserialize_share_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + share_properties = ShareProperties( + metadata=metadata, + **headers + ) + return share_properties + + +def deserialize_directory_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + directory_properties = DirectoryProperties( + metadata=metadata, + **headers + ) + return directory_properties + + +def deserialize_file_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + file_properties = FileProperties( + metadata=metadata, + **headers + ) + if 'Content-Range' in headers: + if 'x-ms-content-md5' in headers: + file_properties.content_settings.content_md5 = headers['x-ms-content-md5'] + else: + file_properties.content_settings.content_md5 = None + return file_properties + + +def deserialize_file_stream(response, obj, headers): + file_properties = deserialize_file_properties(response, obj, headers) + obj.properties = file_properties + return response.http_response.location_mode, obj + + +def deserialize_permission(response, obj, headers): # pylint: disable=unused-argument + ''' + Extracts out file permission + ''' + + return obj.permission + + +def deserialize_permission_key(response, obj, headers): # pylint: disable=unused-argument + ''' + Extracts out file permission key + ''' + + if response is None or headers is None: + return None + return headers.get('x-ms-file-permission-key', None) + + +def get_file_ranges_result(ranges): + # type: (ShareFileRangeList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + file_ranges = [] # type: ignore + clear_ranges = [] # type: List + if ranges.ranges: + file_ranges = [ + {'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] # type: ignore + if ranges.clear_ranges: + clear_ranges = [ + {'start': clear_range.start, 'end': clear_range.end} for clear_range in ranges.clear_ranges] + return file_ranges, clear_ranges diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_directory_client.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_directory_client.py new file mode 100644 index 0000000..1c9d5f1 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_directory_client.py @@ -0,0 +1,726 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import functools +import time +from typing import ( # pylint: disable=unused-import + Optional, Union, Any, Dict, TYPE_CHECKING +) + + +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore + +import six +from azure.core.exceptions import HttpResponseError +from azure.core.paging import ItemPaged +from azure.core.pipeline import Pipeline +from azure.core.tracing.decorator import distributed_trace + +from ._generated import AzureFileStorage +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.request_handlers import add_metadata_headers +from ._shared.response_handlers import return_response_headers, process_storage_error +from ._shared.parser import _str +from ._parser import _get_file_permission, _datetime_to_str +from ._deserialize import deserialize_directory_properties +from ._serialize import get_api_version +from ._file_client import ShareFileClient +from ._models import DirectoryPropertiesPaged, HandlesPaged, NTFSAttributes # pylint: disable=unused-import + +if TYPE_CHECKING: + from datetime import datetime + from ._models import ShareProperties, DirectoryProperties, ContentSettings + from ._generated.models import HandleItem + + +class ShareDirectoryClient(StorageAccountHostsMixin): + """A client to interact with a specific directory, although it may not yet exist. + + For operations relating to a specific subdirectory or file in this share, the clients for those + entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the directory, + use the :func:`from_directory_url` classmethod. + :param share_name: + The name of the share for the directory. + :type share_name: str + :param str directory_path: + The directory path for the directory with which to interact. + If specified, this value will override a directory value specified in the directory URL. + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( # type: ignore + self, account_url, # type: str + share_name, # type: str + directory_path, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Optional[Any] + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not share_name: + raise ValueError("Please specify a share name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + if hasattr(credential, 'get_token'): + raise ValueError("Token credentials not supported by the File service.") + + path_snapshot, sas_token = parse_query(parsed_url.query) + if not sas_token and not credential: + raise ValueError( + 'You need to provide either an account shared key or SAS token when creating a storage service.') + try: + self.snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + self.snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + self.snapshot = snapshot or path_snapshot + + self.share_name = share_name + self.directory_path = directory_path + + self._query_str, credential = self._format_query_string( + sas_token, credential, share_snapshot=self.snapshot) + super(ShareDirectoryClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) + self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + @classmethod + def from_directory_url(cls, directory_url, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Optional[Any] + ): + # type: (...) -> ShareDirectoryClient + """Create a ShareDirectoryClient from a directory url. + + :param str directory_url: + The full URI to the directory. + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials or an account + shared access key. + :returns: A directory client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + try: + if not directory_url.lower().startswith('http'): + directory_url = "https://" + directory_url + except AttributeError: + raise ValueError("Directory URL must be a string.") + parsed_url = urlparse(directory_url.rstrip('/')) + if not parsed_url.path and not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(directory_url)) + account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query + path_snapshot, _ = parse_query(parsed_url.query) + + share_name, _, path_dir = parsed_url.path.lstrip('/').partition('/') + share_name = unquote(share_name) + + directory_path = path_dir + snapshot = snapshot or path_snapshot + + return cls( + account_url=account_url, share_name=share_name, directory_path=directory_path, + credential=credential, **kwargs) + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + share_name = self.share_name + if isinstance(share_name, six.text_type): + share_name = share_name.encode('UTF-8') + directory_path = "" + if self.directory_path: + directory_path = "/" + quote(self.directory_path, safe='~') + return "{}://{}/{}{}{}".format( + self.scheme, + hostname, + quote(share_name), + directory_path, + self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + share_name, # type: str + directory_path, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> ShareDirectoryClient + """Create ShareDirectoryClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param share_name: The name of the share. + :type share_name: str + :param str directory_path: + The directory path. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials or an account + shared access key. + :returns: A directory client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, share_name=share_name, directory_path=directory_path, credential=credential, **kwargs) + + def get_file_client(self, file_name, **kwargs): + # type: (str, Any) -> ShareFileClient + """Get a client to interact with a specific file. + + The file need not already exist. + + :param file_name: + The name of the file. + :returns: A File Client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + if self.directory_path: + file_name = self.directory_path.rstrip('/') + "/" + file_name + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareFileClient( + self.url, file_path=file_name, share_name=self.share_name, napshot=self.snapshot, + credential=self.credential, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, **kwargs) + + def get_subdirectory_client(self, directory_name, **kwargs): + # type: (str, Any) -> ShareDirectoryClient + """Get a client to interact with a specific subdirectory. + + The subdirectory need not already exist. + + :param str directory_name: + The name of the subdirectory. + :returns: A Directory Client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START get_subdirectory_client] + :end-before: [END get_subdirectory_client] + :language: python + :dedent: 12 + :caption: Gets the subdirectory client. + """ + directory_path = self.directory_path.rstrip('/') + "/" + directory_name + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareDirectoryClient( + self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, + _location_mode=self._location_mode, **kwargs) + + @distributed_trace + def create_directory(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Creates a new directory under the directory referenced by the client. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the directory as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Directory-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START create_directory] + :end-before: [END create_directory] + :language: python + :dedent: 12 + :caption: Creates a directory. + """ + timeout = kwargs.pop('timeout', None) + metadata = kwargs.pop('metadata', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return self._client.directory.create( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def delete_directory(self, **kwargs): + # type: (**Any) -> None + """Marks the directory for deletion. The directory is + later deleted during garbage collection. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START delete_directory] + :end-before: [END delete_directory] + :language: python + :dedent: 12 + :caption: Deletes a directory. + """ + timeout = kwargs.pop('timeout', None) + try: + self._client.directory.delete(timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_directories_and_files(self, name_starts_with=None, **kwargs): + # type: (Optional[str], **Any) -> ItemPaged + """Lists all the directories and files under the directory. + + :param str name_starts_with: + Filters the results to return only entities whose names + begin with the specified prefix. + :keyword list[str] include: + Include this parameter to specify one or more datasets to include in the response. + Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey". + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-10-02'. + + :keyword bool include_extended_info: + If this is set to true, file id will be returned in listed results. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-10-02'. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties + :rtype: ~azure.core.paging.ItemPaged[DirectoryProperties and FileProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START lists_directory] + :end-before: [END lists_directory] + :language: python + :dedent: 12 + :caption: List directories and files. + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.directory.list_files_and_directories_segment, + sharesnapshot=self.snapshot, + timeout=timeout, + **kwargs) + return ItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=DirectoryPropertiesPaged) + + @distributed_trace + def list_handles(self, recursive=False, **kwargs): + # type: (bool, Any) -> ItemPaged + """Lists opened handles on a directory or a file under the directory. + + :param bool recursive: + Boolean that specifies if operation should apply to the directory specified by the client, + its files, its subdirectories and their files. Default value is False. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of HandleItem + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.directory.list_handles, + sharesnapshot=self.snapshot, + timeout=timeout, + recursive=recursive, + **kwargs) + return ItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=HandlesPaged) + + @distributed_trace + def close_handle(self, handle, **kwargs): + # type: (Union[str, HandleItem], Any) -> Dict[str, int] + """Close an open file handle. + + :param handle: + A specific handle to close. + :type handle: str or ~azure.storage.fileshare.Handle + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + try: + handle_id = handle.id # type: ignore + except AttributeError: + handle_id = handle + if handle_id == '*': + raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") + try: + response = self._client.directory.force_close_handles( + handle_id, + marker=None, + recursive=None, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + return { + 'closed_handles_count': response.get('number_of_handles_closed', 0), + 'failed_handles_count': response.get('number_of_handles_failed', 0) + } + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def close_all_handles(self, recursive=False, **kwargs): + # type: (bool, Any) -> Dict[str, int] + """Close any open file handles. + + This operation will block until the service has closed all open handles. + + :param bool recursive: + Boolean that specifies if operation should apply to the directory specified by the client, + its files, its subdirectories and their files. Default value is False. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + timeout = kwargs.pop('timeout', None) + start_time = time.time() + + try_close = True + continuation_token = None + total_closed = 0 + total_failed = 0 + while try_close: + try: + response = self._client.directory.force_close_handles( + handle_id='*', + timeout=timeout, + marker=continuation_token, + recursive=recursive, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + continuation_token = response.get('marker') + try_close = bool(continuation_token) + total_closed += response.get('number_of_handles_closed', 0) + total_failed += response.get('number_of_handles_failed', 0) + if timeout: + timeout = max(0, timeout - (time.time() - start_time)) + return { + 'closed_handles_count': total_closed, + 'failed_handles_count': total_failed + } + + @distributed_trace + def get_directory_properties(self, **kwargs): + # type: (Any) -> DirectoryProperties + """Returns all user-defined metadata and system properties for the + specified directory. The data returned does not include the directory's + list of files. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: DirectoryProperties + :rtype: ~azure.storage.fileshare.DirectoryProperties + """ + timeout = kwargs.pop('timeout', None) + try: + response = self._client.directory.get_properties( + timeout=timeout, + cls=deserialize_directory_properties, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return response # type: ignore + + @distributed_trace + def set_directory_metadata(self, metadata, **kwargs): + # type: (Dict[str, Any], Any) -> Dict[str, Any] + """Sets the metadata for the directory. + + Each call to this operation replaces all existing metadata + attached to the directory. To remove all metadata from the directory, + call this operation with an empty metadata dict. + + :param metadata: + Name-value pairs associated with the directory as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Directory-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + try: + return self._client.directory.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="preserve", # type: Union[str, datetime] + file_last_write_time="preserve", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Sets HTTP headers on the directory. + + :param file_attributes: + The file system attributes for files and directories. + If not set, indicates preservation of existing values. + Here is an example for when the var type is str: 'Temporary|Archive' + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Preserve. + :type file_creation_time: str or datetime + :param file_last_write_time: Last write time for the file + Default value: Preserve. + :type file_last_write_time: str or datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + file_permission = _get_file_permission(file_permission, permission_key, 'preserve') + try: + return self._client.directory.set_properties( # type: ignore + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def create_subdirectory( + self, directory_name, # type: str + **kwargs): + # type: (...) -> ShareDirectoryClient + """Creates a new subdirectory and returns a client to interact + with the subdirectory. + + :param str directory_name: + The name of the subdirectory. + :keyword dict(str,str) metadata: + Name-value pairs associated with the subdirectory as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: ShareDirectoryClient + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START create_subdirectory] + :end-before: [END create_subdirectory] + :language: python + :dedent: 12 + :caption: Create a subdirectory. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + subdir = self.get_subdirectory_client(directory_name) + subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) + return subdir # type: ignore + + @distributed_trace + def delete_subdirectory( + self, directory_name, # type: str + **kwargs + ): + # type: (...) -> None + """Deletes a subdirectory. + + :param str directory_name: + The name of the subdirectory. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START delete_subdirectory] + :end-before: [END delete_subdirectory] + :language: python + :dedent: 12 + :caption: Delete a subdirectory. + """ + timeout = kwargs.pop('timeout', None) + subdir = self.get_subdirectory_client(directory_name) + subdir.delete_directory(timeout=timeout, **kwargs) + + @distributed_trace + def upload_file( + self, file_name, # type: str + data, # type: Any + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> ShareFileClient + """Creates a new file in the directory and returns a ShareFileClient + to interact with the file. + + :param str file_name: + The name of the file. + :param Any data: + Content of the file. + :param int length: + Length of the file in bytes. Specify its maximum size, up to 1 TiB. + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each range of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str encoding: + Defaults to UTF-8. + :returns: ShareFileClient + :rtype: ~azure.storage.fileshare.ShareFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START upload_file_to_directory] + :end-before: [END upload_file_to_directory] + :language: python + :dedent: 12 + :caption: Upload a file to a directory. + """ + file_client = self.get_file_client(file_name) + file_client.upload_file( + data, + length=length, + **kwargs) + return file_client # type: ignore + + @distributed_trace + def delete_file( + self, file_name, # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> None + """Marks the specified file for deletion. The file is later + deleted during garbage collection. + + :param str file_name: + The name of the file to delete. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START delete_file_in_directory] + :end-before: [END delete_file_in_directory] + :language: python + :dedent: 12 + :caption: Delete a file in a directory. + """ + file_client = self.get_file_client(file_name) + file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_download.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_download.py new file mode 100644 index 0000000..a2db5aa --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_download.py @@ -0,0 +1,554 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +import threading +import warnings +from io import BytesIO +from typing import Iterator + +from azure.core.exceptions import HttpResponseError, ResourceModifiedError +from azure.core.tracing.common import with_current_context +from ._shared.encryption import decrypt_blob +from ._shared.request_handlers import validate_and_format_range_headers +from ._shared.response_handlers import process_storage_error, parse_length_from_content_range + + +def process_range_and_offset(start_range, end_range, length, encryption): + start_offset, end_offset = 0, 0 + if encryption.get("key") is not None or encryption.get("resolver") is not None: + if start_range is not None: + # Align the start of the range along a 16 byte block + start_offset = start_range % 16 + start_range -= start_offset + + # Include an extra 16 bytes for the IV if necessary + # Because of the previous offsetting, start_range will always + # be a multiple of 16. + if start_range > 0: + start_offset += 16 + start_range -= 16 + + if length is not None: + # Align the end of the range along a 16 byte block + end_offset = 15 - (end_range % 16) + end_range += end_offset + + return (start_range, end_range), (start_offset, end_offset) + + +def process_content(data, start_offset, end_offset, encryption): + if data is None: + raise ValueError("Response cannot be None.") + try: + content = b"".join(list(data)) + except Exception as error: + raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) + if content and encryption.get("key") is not None or encryption.get("resolver") is not None: + try: + return decrypt_blob( + encryption.get("required"), + encryption.get("key"), + encryption.get("resolver"), + content, + start_offset, + end_offset, + data.response.headers, + ) + except Exception as error: + raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) + return content + + +class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + client=None, + total_size=None, + chunk_size=None, + current_progress=None, + start_range=None, + end_range=None, + stream=None, + parallel=None, + validate_content=None, + encryption_options=None, + etag=None, + **kwargs + ): + self.client = client + self.etag = etag + # Information on the download range/chunk size + self.chunk_size = chunk_size + self.total_size = total_size + self.start_index = start_range + self.end_index = end_range + + # The destination that we will write to + self.stream = stream + self.stream_lock = threading.Lock() if parallel else None + self.progress_lock = threading.Lock() if parallel else None + + # For a parallel download, the stream is always seekable, so we note down the current position + # in order to seek to the right place when out-of-order chunks come in + self.stream_start = stream.tell() if parallel else None + + # Download progress so far + self.progress_total = current_progress + + # Encryption + self.encryption_options = encryption_options + + # Parameters for each get operation + self.validate_content = validate_content + self.request_options = kwargs + + def _calculate_range(self, chunk_start): + if chunk_start + self.chunk_size > self.end_index: + chunk_end = self.end_index + else: + chunk_end = chunk_start + self.chunk_size + return chunk_start, chunk_end + + def get_chunk_offsets(self): + index = self.start_index + while index < self.end_index: + yield index + index += self.chunk_size + + def process_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + chunk_data = self._download_chunk(chunk_start, chunk_end - 1) + length = chunk_end - chunk_start + if length > 0: + self._write_to_stream(chunk_data, chunk_start) + self._update_progress(length) + + def yield_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + return self._download_chunk(chunk_start, chunk_end - 1) + + def _update_progress(self, length): + if self.progress_lock: + with self.progress_lock: # pylint: disable=not-context-manager + self.progress_total += length + else: + self.progress_total += length + + def _write_to_stream(self, chunk_data, chunk_start): + if self.stream_lock: + with self.stream_lock: # pylint: disable=not-context-manager + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + else: + self.stream.write(chunk_data) + + def _download_chunk(self, chunk_start, chunk_end): + download_range, offset = process_range_and_offset( + chunk_start, chunk_end, chunk_end, self.encryption_options + ) + range_header, range_validation = validate_and_format_range_headers( + download_range[0], download_range[1], check_content_md5=self.validate_content + ) + + try: + _, response = self.client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self.validate_content, + data_stream_total=self.total_size, + download_stream_current=self.progress_total, + **self.request_options + ) + if response.properties.etag != self.etag: + raise ResourceModifiedError(message="The file has been modified while downloading.") + + except HttpResponseError as error: + process_storage_error(error) + + chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) + return chunk_data + + +class _ChunkIterator(object): + """Async iterator for chunks in blob download stream.""" + + def __init__(self, size, content, downloader, chunk_size): + self.size = size + self._chunk_size = chunk_size + self._current_content = content + self._iter_downloader = downloader + self._iter_chunks = None + self._complete = (size == 0) + + def __len__(self): + return self.size + + def __iter__(self): + return self + + def __next__(self): + """Iterate through responses.""" + if self._complete: + raise StopIteration("Download complete") + if not self._iter_downloader: + # cut the data obtained from initial GET into chunks + if len(self._current_content) > self._chunk_size: + return self._get_chunk_data() + self._complete = True + return self._current_content + + if not self._iter_chunks: + self._iter_chunks = self._iter_downloader.get_chunk_offsets() + + # initial GET result still has more than _chunk_size bytes of data + if len(self._current_content) >= self._chunk_size: + return self._get_chunk_data() + + try: + chunk = next(self._iter_chunks) + self._current_content += self._iter_downloader.yield_chunk(chunk) + except StopIteration as e: + self._complete = True + if self._current_content: + return self._current_content + raise e + + return self._get_chunk_data() + + next = __next__ # Python 2 compatibility. + + def _get_chunk_data(self): + chunk_data = self._current_content[: self._chunk_size] + self._current_content = self._current_content[self._chunk_size:] + return chunk_data + + +class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the file being downloaded. + :ivar: str path: + The full path of the file. + :ivar str share: + The name of the share where the file is. + :ivar ~azure.storage.fileshare.FileProperties properties: + The properties of the file being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if speficied, + otherwise the total size of the file. + """ + + def __init__( + self, + client=None, + config=None, + start_range=None, + end_range=None, + validate_content=None, + encryption_options=None, + max_concurrency=1, + name=None, + path=None, + share=None, + encoding=None, + **kwargs + ): + self.name = name + self.path = path + self.share = share + self.properties = None + self.size = None + + self._client = client + self._config = config + self._start_range = start_range + self._end_range = end_range + self._max_concurrency = max_concurrency + self._encoding = encoding + self._validate_content = validate_content + self._encryption_options = encryption_options or {} + self._request_options = kwargs + self._location_mode = None + self._download_complete = False + self._current_content = None + self._file_size = None + self._response = None + self._etag = None + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + self._first_get_size = ( + self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size + ) + initial_request_start = self._start_range if self._start_range is not None else 0 + if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: + initial_request_end = self._end_range + else: + initial_request_end = initial_request_start + self._first_get_size - 1 + + self._initial_range, self._initial_offset = process_range_and_offset( + initial_request_start, initial_request_end, self._end_range, self._encryption_options + ) + + self._response = self._initial_request() + self.properties = self._response.properties + self.properties.name = self.name + self.properties.path = self.path + self.properties.share = self.share + + # Set the content length to the download size instead of the size of + # the last range + self.properties.size = self.size + + # Overwrite the content range to the user requested range + self.properties.content_range = "bytes {0}-{1}/{2}".format( + self._start_range, + self._end_range, + self._file_size + ) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + self.properties.content_md5 = None + + if self.size == 0: + self._current_content = b"" + else: + self._current_content = process_content( + self._response, + self._initial_offset[0], + self._initial_offset[1], + self._encryption_options + ) + + def __len__(self): + return self.size + + def _initial_request(self): + range_header, range_validation = validate_and_format_range_headers( + self._initial_range[0], + self._initial_range[1], + start_range_required=False, + end_range_required=False, + check_content_md5=self._validate_content + ) + + try: + location_mode, response = self._client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self._validate_content, + data_stream_total=None, + download_stream_current=0, + **self._request_options + ) + + # Check the location we read from to ensure we use the same one + # for subsequent requests. + self._location_mode = location_mode + + # Parse the total file size and adjust the download size if ranges + # were specified + self._file_size = parse_length_from_content_range(response.properties.content_range) + if self._end_range is not None: + # Use the end range index unless it is over the end of the file + self.size = min(self._file_size, self._end_range - self._start_range + 1) + elif self._start_range is not None: + self.size = self._file_size - self._start_range + else: + self.size = self._file_size + + except HttpResponseError as error: + if self._start_range is None and error.response.status_code == 416: + # Get range will fail on an empty file. If the user did not + # request a range, do a regular get request in order to get + # any properties. + try: + _, response = self._client.download( + validate_content=self._validate_content, + data_stream_total=0, + download_stream_current=0, + **self._request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + # Set the download size to empty + self.size = 0 + self._file_size = 0 + else: + process_storage_error(error) + + # If the file is small, the download is complete at this point. + # If file size is large, download the rest of the file in chunks. + if response.properties.size == self.size: + self._download_complete = True + self._etag = response.properties.etag + return response + + def chunks(self): + # type: () -> Iterator[bytes] + """Iterate over chunks in the download stream. + + :rtype: Iterator[bytes] + """ + if self.size == 0 or self._download_complete: + iter_downloader = None + else: + data_end = self._file_size + if self._end_range is not None: + # Use the end range index unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + iter_downloader = _ChunkDownloader( + client=self._client, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # start where the first download ended + end_range=data_end, + stream=None, + parallel=False, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + etag=self._etag, + **self._request_options + ) + return _ChunkIterator( + size=self.size, + content=self._current_content, + downloader=iter_downloader, + chunk_size=self._config.max_chunk_get_size) + + def readall(self): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + :rtype: bytes or str + """ + stream = BytesIO() + self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + def content_as_bytes(self, max_concurrency=1): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :rtype: bytes + """ + warnings.warn( + "content_as_bytes is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + return self.readall() + + def content_as_text(self, max_concurrency=1, encoding="UTF-8"): + """Download the contents of this file, and decode as text. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :param str encoding: + Test encoding to decode the downloaded bytes. Default is UTF-8. + :rtype: str + """ + warnings.warn( + "content_as_text is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self._encoding = encoding + return self.readall() + + def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + # The stream must be seekable if parallel download is required + parallel = self._max_concurrency > 1 + if parallel: + error_message = "Target stream handle must be seekable." + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(error_message) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(error_message) + + # Write the content to the user stream + stream.write(self._current_content) + if self._download_complete: + return self.size + + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + downloader = _ChunkDownloader( + client=self._client, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # Start where the first download ended + end_range=data_end, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + etag=self._etag, + **self._request_options + ) + if parallel: + import concurrent.futures + with concurrent.futures.ThreadPoolExecutor(self._max_concurrency) as executor: + list(executor.map( + with_current_context(downloader.process_chunk), + downloader.get_chunk_offsets() + )) + else: + for chunk in downloader.get_chunk_offsets(): + downloader.process_chunk(chunk) + return self.size + + def download_to_stream(self, stream, max_concurrency=1): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The properties of the downloaded file. + :rtype: Any + """ + warnings.warn( + "download_to_stream is deprecated, use readinto instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self.readinto(stream) + return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_file_client.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_file_client.py new file mode 100644 index 0000000..5f8f979 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_file_client.py @@ -0,0 +1,1411 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines, too-many-public-methods +import functools +import time +from io import BytesIO +from typing import ( # pylint: disable=unused-import + Optional, Union, IO, List, Dict, Any, Iterable, Tuple, + TYPE_CHECKING +) + + +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore + +import six +from azure.core.exceptions import HttpResponseError +from azure.core.paging import ItemPaged # pylint: disable=ungrouped-imports +from azure.core.tracing.decorator import distributed_trace + +from ._generated import AzureFileStorage +from ._generated.models import FileHTTPHeaders +from ._shared.uploads import IterStreamer, FileChunkUploader, upload_data_chunks +from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query +from ._shared.request_handlers import add_metadata_headers, get_length +from ._shared.response_handlers import return_response_headers, process_storage_error +from ._shared.parser import _str +from ._parser import _get_file_permission, _datetime_to_str +from ._lease import ShareLeaseClient +from ._serialize import get_source_conditions, get_access_conditions, get_smb_properties, get_api_version +from ._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result +from ._models import HandlesPaged, NTFSAttributes # pylint: disable=unused-import +from ._download import StorageStreamDownloader + +if TYPE_CHECKING: + from datetime import datetime + from ._models import ShareProperties, ContentSettings, FileProperties, Handle + from ._generated.models import HandleItem + + +def _upload_file_helper( + client, + stream, + size, + metadata, + content_settings, + validate_content, + timeout, + max_concurrency, + file_settings, + file_attributes="none", + file_creation_time="now", + file_last_write_time="now", + file_permission=None, + file_permission_key=None, + **kwargs): + try: + if size is None or size < 0: + raise ValueError("A content size must be specified for a File.") + response = client.create_file( + size, + content_settings=content_settings, + metadata=metadata, + timeout=timeout, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_permission=file_permission, + permission_key=file_permission_key, + **kwargs + ) + if size == 0: + return response + + responses = upload_data_chunks( + service=client, + uploader_class=FileChunkUploader, + total_size=size, + chunk_size=file_settings.max_range_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + timeout=timeout, + **kwargs + ) + return sorted(responses, key=lambda r: r.get('last_modified'))[-1] + except HttpResponseError as error: + process_storage_error(error) + + +class ShareFileClient(StorageAccountHostsMixin): + """A client to interact with a specific file, although that file may not yet exist. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the + file, use the :func:`from_file_url` classmethod. + :param share_name: + The name of the share for the file. + :type share_name: str + :param str file_path: + The file path to the file with which to interact. If specified, this value will override + a file value specified in the file URL. + :param str snapshot: + An optional file snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( # type: ignore + self, account_url, # type: str + share_name, # type: str + file_path, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not (share_name and file_path): + raise ValueError("Please specify a share name and file name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + if hasattr(credential, 'get_token'): + raise ValueError("Token credentials not supported by the File service.") + + path_snapshot = None + path_snapshot, sas_token = parse_query(parsed_url.query) + if not sas_token and not credential: + raise ValueError( + 'You need to provide either an account shared key or SAS token when creating a storage service.') + try: + self.snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + self.snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + self.snapshot = snapshot or path_snapshot + + self.share_name = share_name + self.file_path = file_path.split('/') + self.file_name = self.file_path[-1] + self.directory_path = "/".join(self.file_path[:-1]) + + self._query_str, credential = self._format_query_string( + sas_token, credential, share_snapshot=self.snapshot) + super(ShareFileClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) + self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + @classmethod + def from_file_url( + cls, file_url, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> ShareFileClient + """A client to interact with a specific file, although that file may not yet exist. + + :param str file_url: The full URI to the file. + :param str snapshot: + An optional file snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials or an account + shared access key. + :returns: A File client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + try: + if not file_url.lower().startswith('http'): + file_url = "https://" + file_url + except AttributeError: + raise ValueError("File URL must be a string.") + parsed_url = urlparse(file_url.rstrip('/')) + + if not (parsed_url.netloc and parsed_url.path): + raise ValueError("Invalid URL: {}".format(file_url)) + account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query + + path_share, _, path_file = parsed_url.path.lstrip('/').partition('/') + path_snapshot, _ = parse_query(parsed_url.query) + snapshot = snapshot or path_snapshot + share_name = unquote(path_share) + file_path = '/'.join([unquote(p) for p in path_file.split('/')]) + return cls(account_url, share_name, file_path, snapshot, credential, **kwargs) + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + share_name = self.share_name + if isinstance(share_name, six.text_type): + share_name = share_name.encode('UTF-8') + return "{}://{}/{}/{}{}".format( + self.scheme, + hostname, + quote(share_name), + "/".join([quote(p, safe='~') for p in self.file_path]), + self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + share_name, # type: str + file_path, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> ShareFileClient + """Create ShareFileClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param share_name: The name of the share. + :type share_name: str + :param str file_path: + The file path. + :param str snapshot: + An optional file snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials or an account + shared access key. + :returns: A File client. + :rtype: ~azure.storage.fileshare.ShareFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_hello_world.py + :start-after: [START create_file_client] + :end-before: [END create_file_client] + :language: python + :dedent: 12 + :caption: Creates the file client with connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, credential=credential, **kwargs) + + @distributed_trace + def acquire_lease(self, lease_id=None, **kwargs): + # type: (Optional[str], **Any) -> ShareLeaseClient + """Requests a new lease. + + If the file does not have an active lease, the File + Service creates a lease on the blob and returns a new lease. + + :param str lease_id: + Proposed lease ID, in a GUID string format. The File Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A ShareLeaseClient object. + :rtype: ~azure.storage.fileshare.ShareLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START acquire_and_release_lease_on_file] + :end-before: [END acquire_and_release_lease_on_file] + :language: python + :dedent: 12 + :caption: Acquiring a lease on a file. + """ + kwargs['lease_duration'] = -1 + lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore + lease.acquire(**kwargs) + return lease + + @distributed_trace + def create_file( # type: ignore + self, size, # type: int + file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="now", # type: Union[str, datetime] + file_last_write_time="now", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Creates a new file. + + Note that it only initializes the file with no content. + + :param int size: Specifies the maximum size for the file, + up to 1 TB. + :param file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "None" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Now. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Now. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START create_file] + :end-before: [END create_file] + :language: python + :dedent: 12 + :caption: Create a file. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + content_settings = kwargs.pop('content_settings', None) + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + if self.require_encryption and not self.key_encryption_key: + raise ValueError("Encryption required but no key was provided.") + + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + file_http_headers = None + if content_settings: + file_http_headers = FileHTTPHeaders( + file_cache_control=content_settings.cache_control, + file_content_type=content_settings.content_type, + file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + file_content_encoding=content_settings.content_encoding, + file_content_language=content_settings.content_language, + file_content_disposition=content_settings.content_disposition + ) + file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') + try: + return self._client.file.create( # type: ignore + file_content_length=size, + metadata=metadata, + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + file_http_headers=file_http_headers, + lease_access_conditions=access_conditions, + headers=headers, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def upload_file( + self, data, # type: Any + length=None, # type: Optional[int] + file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="now", # type: Union[str, datetime] + file_last_write_time="now", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Uploads a new file. + + :param Any data: + Content of the file. + :param int length: + Length of the file in bytes. Specify its maximum size, up to 1 TiB. + :param file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "None" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes + :param file_creation_time: Creation time for the file + Default value: Now. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Now. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each range of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str encoding: + Defaults to UTF-8. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START upload_file] + :end-before: [END upload_file] + :language: python + :dedent: 12 + :caption: Upload a file. + """ + metadata = kwargs.pop('metadata', None) + content_settings = kwargs.pop('content_settings', None) + max_concurrency = kwargs.pop('max_concurrency', 1) + validate_content = kwargs.pop('validate_content', False) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + + if isinstance(data, six.text_type): + data = data.encode(encoding) + if length is None: + length = get_length(data) + if isinstance(data, bytes): + data = data[:length] + + if isinstance(data, bytes): + stream = BytesIO(data) + elif hasattr(data, 'read'): + stream = data + elif hasattr(data, '__iter__'): + stream = IterStreamer(data, encoding=encoding) # type: ignore + else: + raise TypeError("Unsupported data type: {}".format(type(data))) + return _upload_file_helper( # type: ignore + self, + stream, + length, + metadata, + content_settings, + validate_content, + timeout, + max_concurrency, + self._config, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_permission=file_permission, + file_permission_key=permission_key, + **kwargs) + + @distributed_trace + def start_copy_from_url(self, source_url, **kwargs): + # type: (str, Any) -> Any + """Initiates the copying of data from a source URL into the file + referenced by the client. + + The status of this copy operation can be found using the `get_properties` + method. + + :param str source_url: + Specifies the URL of the source file. + :keyword str file_permission: + If specified the permission (security descriptor) shall be set for the directory/file. + This value can be set to "source" to copy the security descriptor from the source file. + Otherwise if set, this value will be used to override the source value. If not set, permission value + is inherited from the parent directory of the target file. This setting can be + used if Permission size is <= 8KB, otherwise permission_key shall be used. + If SDDL is specified as input, it must have owner, group and dacl. + Note: Only one of the file_permission or permission_key should be specified. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword str permission_key: + Key of the permission to be set for the directory/file. + This value can be set to "source" to copy the security descriptor from the source file. + Otherwise if set, this value will be used to override the source value. If not set, permission value + is inherited from the parent directory of the target file. + Note: Only one of the file_permission or permission_key should be specified. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword file_attributes: + This value can be set to "source" to copy file attributes from the source file to the target file, + or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes + to set on the target file. If this is not set, the default value is "Archive". + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :keyword file_creation_time: + This value can be set to "source" to copy the creation time from the source file to the target file, + or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. + If this is not set, creation time will be set to the date time value of the creation + (or when it was overwritten) of the target file by copy engine. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_creation_time: str or ~datetime.datetime + :keyword file_last_write_time: + This value can be set to "source" to copy the last write time from the source file to the target file, or + a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. + If this is not set, value will be the last write time to the file by the copy engine. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_last_write_time: str or ~datetime.datetime + :keyword bool ignore_read_only: + Specifies the option to overwrite the target file if it already exists and has read-only attribute set. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword bool set_archive_attribute: + Specifies the option to set the archive attribute on the target file. + True means the archive attribute will be set on the target file despite attribute + overrides or the source file state. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START copy_file_from_url] + :end-before: [END copy_file_from_url] + :language: python + :dedent: 12 + :caption: Copy a file from a URL + """ + metadata = kwargs.pop('metadata', None) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + kwargs.update(get_smb_properties(kwargs)) + try: + return self._client.file.start_copy( + source_url, + metadata=metadata, + lease_access_conditions=access_conditions, + headers=headers, + cls=return_response_headers, + timeout=timeout, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + def abort_copy(self, copy_id, **kwargs): + # type: (Union[str, FileProperties], Any) -> None + """Abort an ongoing copy operation. + + This will leave a destination file with zero length and full metadata. + This will raise an error if the copy operation has already ended. + + :param copy_id: + The copy operation to abort. This can be either an ID, or an + instance of FileProperties. + :type copy_id: str or ~azure.storage.fileshare.FileProperties + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + copy_id = copy_id.copy.id + except AttributeError: + try: + copy_id = copy_id['copy_id'] + except TypeError: + pass + try: + self._client.file.abort_copy(copy_id=copy_id, + lease_access_conditions=access_conditions, + timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def download_file( + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs + ): + # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader + """Downloads a file to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the file into + a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. + + :param int offset: + Start of byte range to use for downloading a section of the file. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.fileshare.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START download_file] + :end-before: [END download_file] + :language: python + :dedent: 12 + :caption: Download a file. + """ + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + if length is not None and offset is None: + raise ValueError("Offset value must not be None if length is set.") + + range_end = None + if length is not None: + range_end = offset + length - 1 # Service actually uses an end-range inclusive index + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + return StorageStreamDownloader( + client=self._client.file, + config=self._config, + start_range=offset, + end_range=range_end, + encryption_options=None, + name=self.file_name, + path='/'.join(self.file_path), + share=self.share_name, + lease_access_conditions=access_conditions, + cls=deserialize_file_stream, + **kwargs) + + @distributed_trace + def delete_file(self, **kwargs): + # type: (Any) -> None + """Marks the specified file for deletion. The file is + later deleted during garbage collection. + + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START delete_file] + :end-before: [END delete_file] + :language: python + :dedent: 12 + :caption: Delete a file. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_file_properties(self, **kwargs): + # type: (Any) -> FileProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file. + + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: FileProperties + :rtype: ~azure.storage.fileshare.FileProperties + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + file_props = self._client.file.get_properties( + sharesnapshot=self.snapshot, + lease_access_conditions=access_conditions, + timeout=timeout, + cls=deserialize_file_properties, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + file_props.name = self.file_name + file_props.share = self.share_name + file_props.snapshot = self.snapshot + file_props.path = '/'.join(self.file_path) + return file_props # type: ignore + + @distributed_trace + def set_http_headers(self, content_settings, # type: ContentSettings + file_attributes="preserve", # type: Union[str, NTFSAttributes] + file_creation_time="preserve", # type: Union[str, datetime] + file_last_write_time="preserve", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Sets HTTP headers on the file. + + :param ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param file_attributes: + The file system attributes for files and directories. + If not set, indicates preservation of existing values. + Here is an example for when the var type is str: 'Temporary|Archive' + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Preserve. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Preserve. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + file_content_length = kwargs.pop('size', None) + file_http_headers = FileHTTPHeaders( + file_cache_control=content_settings.cache_control, + file_content_type=content_settings.content_type, + file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + file_content_encoding=content_settings.content_encoding, + file_content_language=content_settings.content_language, + file_content_disposition=content_settings.content_disposition + ) + file_permission = _get_file_permission(file_permission, permission_key, 'preserve') + try: + return self._client.file.set_http_headers( # type: ignore + file_content_length=file_content_length, + file_http_headers=file_http_headers, + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + lease_access_conditions=access_conditions, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_file_metadata(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] + """Sets user-defined metadata for the specified file as one or more + name-value pairs. + + Each call to this operation replaces all existing metadata + attached to the file. To remove all metadata from the file, + call this operation with no metadata dict. + + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return self._client.file.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + metadata=metadata, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def upload_range( # type: ignore + self, data, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Upload a range of bytes to a file. + + :param bytes data: + The data to upload. + :param int offset: + Start of byte range to use for uploading a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for uploading a section of the file. + The range can be up to 4 MB in size. + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + file. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str encoding: + Defaults to UTF-8. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + validate_content = kwargs.pop('validate_content', False) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + if isinstance(data, six.text_type): + data = data.encode(encoding) + + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + try: + return self._client.file.upload_range( # type: ignore + range=content_range, + content_length=length, + optionalbody=data, + timeout=timeout, + validate_content=validate_content, + lease_access_conditions=access_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @staticmethod + def _upload_range_from_url_options(source_url, # type: str + offset, # type: int + length, # type: int + source_offset, # type: int + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + + if offset is None: + raise ValueError("offset must be provided.") + if length is None: + raise ValueError("length must be provided.") + if source_offset is None: + raise ValueError("source_offset must be provided.") + + # Format range + end_range = offset + length - 1 + destination_range = 'bytes={0}-{1}'.format(offset, end_range) + source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) + source_authorization = kwargs.pop('source_authorization', None) + source_mod_conditions = get_source_conditions(kwargs) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + options = { + 'copy_source_authorization': source_authorization, + 'copy_source': source_url, + 'content_length': 0, + 'source_range': source_range, + 'range': destination_range, + 'source_modified_access_conditions': source_mod_conditions, + 'lease_access_conditions': access_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def upload_range_from_url(self, source_url, + offset, + length, + source_offset, + **kwargs + ): + # type: (str, int, int, int, **Any) -> Dict[str, Any] + """ + Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. + + :param int offset: + Start of byte range to use for updating a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for updating a section of the file. + The range can be up to 4 MB in size. + :param str source_url: + A URL of up to 2 KB in length that specifies an Azure file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.file.core.windows.net/myshare/mydir/myfile + https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (length-offset). + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source + blob has been modified since the specified date/time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source blob + has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + """ + options = self._upload_range_from_url_options( + source_url=source_url, + offset=offset, + length=length, + source_offset=source_offset, + **kwargs + ) + try: + return self._client.file.upload_range_from_url(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _get_ranges_options( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + previous_sharesnapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Unsupported method for encryption.") + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + content_range = None + if offset is not None: + if length is not None: + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + else: + content_range = 'bytes={0}-'.format(offset) + options = { + 'sharesnapshot': self.snapshot, + 'lease_access_conditions': access_conditions, + 'timeout': kwargs.pop('timeout', None), + 'range': content_range} + if previous_sharesnapshot: + try: + options['prevsharesnapshot'] = previous_sharesnapshot.snapshot # type: ignore + except AttributeError: + try: + options['prevsharesnapshot'] = previous_sharesnapshot['snapshot'] # type: ignore + except TypeError: + options['prevsharesnapshot'] = previous_sharesnapshot + options.update(kwargs) + return options + + @distributed_trace + def get_ranges( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> List[Dict[str, int]] + """Returns the list of valid page ranges for a file or snapshot + of a file. + + :param int offset: + Specifies the start offset of bytes over which to get ranges. + :param int length: + Number of bytes to use over which to get ranges. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A list of valid ranges. + :rtype: List[dict[str, int]] + """ + options = self._get_ranges_options( + offset=offset, + length=length, + **kwargs) + try: + ranges = self._client.file.get_range_list(**options) + except HttpResponseError as error: + process_storage_error(error) + return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] + + def get_ranges_diff( # type: ignore + self, + previous_sharesnapshot, # type: Union[str, Dict[str, Any]] + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a file or snapshot + of a file. + + .. versionadded:: 12.6.0 + + :param int offset: + Specifies the start offset of bytes over which to get ranges. + :param int length: + Number of bytes to use over which to get ranges. + :param str previous_sharesnapshot: + The snapshot diff parameter that contains an opaque DateTime value that + specifies a previous file snapshot to be compared + against a more recent snapshot or the current file. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys. + The first element are filled file ranges, the 2nd element is cleared file ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_ranges_options( + offset=offset, + length=length, + previous_sharesnapshot=previous_sharesnapshot, + **kwargs) + try: + ranges = self._client.file.get_range_list(**options) + except HttpResponseError as error: + process_storage_error(error) + return get_file_ranges_result(ranges) + + @distributed_trace + def clear_range( # type: ignore + self, offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Clears the specified range and releases the space used in storage for + that range. + + :param int offset: + Start of byte range to use for clearing a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for clearing a section of the file. + The range can be up to 4 MB in size. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Unsupported method for encryption.") + + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 bytes file size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 bytes file size") + end_range = length + offset - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + try: + return self._client.file.upload_range( # type: ignore + timeout=timeout, + cls=return_response_headers, + content_length=0, + optionalbody=None, + file_range_write="clear", + range=content_range, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def resize_file(self, size, **kwargs): + # type: (int, Any) -> Dict[str, Any] + """Resizes a file to the specified size. + + :param int size: + Size to resize file to (in bytes) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + return self._client.file.set_http_headers( # type: ignore + file_content_length=size, + file_attributes="preserve", + file_creation_time="preserve", + file_last_write_time="preserve", + file_permission="preserve", + lease_access_conditions=access_conditions, + cls=return_response_headers, + timeout=timeout, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_handles(self, **kwargs): + # type: (Any) -> ItemPaged[Handle] + """Lists handles for file. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of HandleItem + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.HandleItem] + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.file.list_handles, + sharesnapshot=self.snapshot, + timeout=timeout, + **kwargs) + return ItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=HandlesPaged) + + @distributed_trace + def close_handle(self, handle, **kwargs): + # type: (Union[str, HandleItem], Any) -> Dict[str, int] + """Close an open file handle. + + :param handle: + A specific handle to close. + :type handle: str or ~azure.storage.fileshare.Handle + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + try: + handle_id = handle.id # type: ignore + except AttributeError: + handle_id = handle + if handle_id == '*': + raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") + try: + response = self._client.file.force_close_handles( + handle_id, + marker=None, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + return { + 'closed_handles_count': response.get('number_of_handles_closed', 0), + 'failed_handles_count': response.get('number_of_handles_failed', 0) + } + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def close_all_handles(self, **kwargs): + # type: (Any) -> Dict[str, int] + """Close any open file handles. + + This operation will block until the service has closed all open handles. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + timeout = kwargs.pop('timeout', None) + start_time = time.time() + + try_close = True + continuation_token = None + total_closed = 0 + total_failed = 0 + while try_close: + try: + response = self._client.file.force_close_handles( + handle_id='*', + timeout=timeout, + marker=continuation_token, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + continuation_token = response.get('marker') + try_close = bool(continuation_token) + total_closed += response.get('number_of_handles_closed', 0) + total_failed += response.get('number_of_handles_failed', 0) + if timeout: + timeout = max(0, timeout - (time.time() - start_time)) + return { + 'closed_handles_count': total_closed, + 'failed_handles_count': total_failed + } diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/__init__.py new file mode 100644 index 0000000..34ce526 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/__init__.py @@ -0,0 +1,16 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._azure_file_storage import AzureFileStorage +__all__ = ['AzureFileStorage'] + +try: + from ._patch import patch_sdk # type: ignore + patch_sdk() +except ImportError: + pass diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/_azure_file_storage.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/_azure_file_storage.py new file mode 100644 index 0000000..6275ae4 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/_azure_file_storage.py @@ -0,0 +1,96 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING + +from azure.core import PipelineClient +from msrest import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + + from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from ._configuration import AzureFileStorageConfiguration +from .operations import ServiceOperations +from .operations import ShareOperations +from .operations import DirectoryOperations +from .operations import FileOperations +from . import models + + +class AzureFileStorage(object): + """AzureFileStorage. + + :ivar service: ServiceOperations operations + :vartype service: azure.storage.fileshare.operations.ServiceOperations + :ivar share: ShareOperations operations + :vartype share: azure.storage.fileshare.operations.ShareOperations + :ivar directory: DirectoryOperations operations + :vartype directory: azure.storage.fileshare.operations.DirectoryOperations + :ivar file: FileOperations operations + :vartype file: azure.storage.fileshare.operations.FileOperations + :param url: The URL of the service account, share, directory or file that is the target of the desired operation. + :type url: str + """ + + def __init__( + self, + url, # type: str + **kwargs # type: Any + ): + # type: (...) -> None + base_url = '{url}' + self._config = AzureFileStorageConfiguration(url, **kwargs) + self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._serialize.client_side_validation = False + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.share = ShareOperations( + self._client, self._config, self._serialize, self._deserialize) + self.directory = DirectoryOperations( + self._client, self._config, self._serialize, self._deserialize) + self.file = FileOperations( + self._client, self._config, self._serialize, self._deserialize) + + def _send_request(self, http_request, **kwargs): + # type: (HttpRequest, Any) -> HttpResponse + """Runs the network request through the client's chained policies. + + :param http_request: The network request you want to make. Required. + :type http_request: ~azure.core.pipeline.transport.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to True. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.pipeline.transport.HttpResponse + """ + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + http_request.url = self._client.format_url(http_request.url, **path_format_arguments) + stream = kwargs.pop("stream", True) + pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs) + return pipeline_response.http_response + + def close(self): + # type: () -> None + self._client.close() + + def __enter__(self): + # type: () -> AzureFileStorage + self._client.__enter__() + return self + + def __exit__(self, *exc_details): + # type: (Any) -> None + self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/_configuration.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/_configuration.py new file mode 100644 index 0000000..7d76099 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/_configuration.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + +VERSION = "unknown" + +class AzureFileStorageConfiguration(Configuration): + """Configuration for AzureFileStorage. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, share, directory or file that is the target of the desired operation. + :type url: str + """ + + def __init__( + self, + url, # type: str + **kwargs # type: Any + ): + # type: (...) -> None + if url is None: + raise ValueError("Parameter 'url' must not be None.") + super(AzureFileStorageConfiguration, self).__init__(**kwargs) + + self.url = url + self.version = "2020-10-02" + self.file_range_write_from_url = "update" + kwargs.setdefault('sdk_moniker', 'azurefilestorage/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs # type: Any + ): + # type: (...) -> None + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/__init__.py new file mode 100644 index 0000000..f306ba0 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/__init__.py @@ -0,0 +1,10 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._azure_file_storage import AzureFileStorage +__all__ = ['AzureFileStorage'] diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/_azure_file_storage.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/_azure_file_storage.py new file mode 100644 index 0000000..7453a46 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/_azure_file_storage.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core import AsyncPipelineClient +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from msrest import Deserializer, Serializer + +from ._configuration import AzureFileStorageConfiguration +from .operations import ServiceOperations +from .operations import ShareOperations +from .operations import DirectoryOperations +from .operations import FileOperations +from .. import models + + +class AzureFileStorage(object): + """AzureFileStorage. + + :ivar service: ServiceOperations operations + :vartype service: azure.storage.fileshare.aio.operations.ServiceOperations + :ivar share: ShareOperations operations + :vartype share: azure.storage.fileshare.aio.operations.ShareOperations + :ivar directory: DirectoryOperations operations + :vartype directory: azure.storage.fileshare.aio.operations.DirectoryOperations + :ivar file: FileOperations operations + :vartype file: azure.storage.fileshare.aio.operations.FileOperations + :param url: The URL of the service account, share, directory or file that is the target of the desired operation. + :type url: str + """ + + def __init__( + self, + url: str, + **kwargs: Any + ) -> None: + base_url = '{url}' + self._config = AzureFileStorageConfiguration(url, **kwargs) + self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._serialize.client_side_validation = False + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.share = ShareOperations( + self._client, self._config, self._serialize, self._deserialize) + self.directory = DirectoryOperations( + self._client, self._config, self._serialize, self._deserialize) + self.file = FileOperations( + self._client, self._config, self._serialize, self._deserialize) + + async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse: + """Runs the network request through the client's chained policies. + + :param http_request: The network request you want to make. Required. + :type http_request: ~azure.core.pipeline.transport.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to True. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.pipeline.transport.AsyncHttpResponse + """ + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + http_request.url = self._client.format_url(http_request.url, **path_format_arguments) + stream = kwargs.pop("stream", True) + pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs) + return pipeline_response.http_response + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "AzureFileStorage": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details) -> None: + await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/_configuration.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/_configuration.py new file mode 100644 index 0000000..4ec5174 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/_configuration.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +VERSION = "unknown" + +class AzureFileStorageConfiguration(Configuration): + """Configuration for AzureFileStorage. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, share, directory or file that is the target of the desired operation. + :type url: str + """ + + def __init__( + self, + url: str, + **kwargs: Any + ) -> None: + if url is None: + raise ValueError("Parameter 'url' must not be None.") + super(AzureFileStorageConfiguration, self).__init__(**kwargs) + + self.url = url + self.version = "2020-10-02" + self.file_range_write_from_url = "update" + kwargs.setdefault('sdk_moniker', 'azurefilestorage/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs: Any + ) -> None: + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/__init__.py new file mode 100644 index 0000000..ba8fb22 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/__init__.py @@ -0,0 +1,19 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._share_operations import ShareOperations +from ._directory_operations import DirectoryOperations +from ._file_operations import FileOperations + +__all__ = [ + 'ServiceOperations', + 'ShareOperations', + 'DirectoryOperations', + 'FileOperations', +] diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_directory_operations.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_directory_operations.py new file mode 100644 index 0000000..2d3fa63 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_directory_operations.py @@ -0,0 +1,750 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class DirectoryOperations: + """DirectoryOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.fileshare.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create( + self, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + file_permission: Optional[str] = "inherit", + file_permission_key: Optional[str] = None, + file_attributes: str = "none", + file_creation_time: str = "now", + file_last_write_time: str = "now", + **kwargs: Any + ) -> None: + """Creates a new directory under the specified share or parent directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. + :type metadata: str + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param file_attributes: If specified, the provided file attributes shall be set. Default value: + ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. Default value: Now. + :type file_last_write_time: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "directory" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) + response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) + response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) + response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) + response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) + response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{shareName}/{directory}'} # type: ignore + + async def get_properties( + self, + sharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + **kwargs: Any + ) -> None: + """Returns all system properties for the specified directory, and can also be used to check the + existence of a directory. The data returned does not include the files in the directory or any + subdirectories. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "directory" + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) + response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) + response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) + response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) + response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{shareName}/{directory}'} # type: ignore + + async def delete( + self, + timeout: Optional[int] = None, + **kwargs: Any + ) -> None: + """Removes the specified empty directory. Note that the directory must be empty before it can be + deleted. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "directory" + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{shareName}/{directory}'} # type: ignore + + async def set_properties( + self, + timeout: Optional[int] = None, + file_permission: Optional[str] = "inherit", + file_permission_key: Optional[str] = None, + file_attributes: str = "none", + file_creation_time: str = "now", + file_last_write_time: str = "now", + **kwargs: Any + ) -> None: + """Sets properties on the directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param file_attributes: If specified, the provided file attributes shall be set. Default value: + ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. Default value: Now. + :type file_last_write_time: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "directory" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.set_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) + response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) + response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) + response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) + response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) + response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {'url': '/{shareName}/{directory}'} # type: ignore + + async def set_metadata( + self, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + **kwargs: Any + ) -> None: + """Updates user defined metadata for the specified directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. + :type metadata: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "directory" + comp = "metadata" + accept = "application/xml" + + # Construct URL + url = self.set_metadata.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {'url': '/{shareName}/{directory}'} # type: ignore + + async def list_files_and_directories_segment( + self, + prefix: Optional[str] = None, + sharesnapshot: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + timeout: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListFilesIncludeType"]]] = None, + include_extended_info: Optional[bool] = None, + **kwargs: Any + ) -> "_models.ListFilesAndDirectoriesSegmentResponse": + """Returns a list of files or directories under the specified share or directory. It lists the + contents only for a single level of the directory hierarchy. + + :param prefix: Filters the results to return only entries whose name begins with the specified + prefix. + :type prefix: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. If the request does not + specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 + items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param include: Include this parameter to specify one or more datasets to include in the + response. + :type include: list[str or ~azure.storage.fileshare.models.ListFilesIncludeType] + :param include_extended_info: + :type include_extended_info: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListFilesAndDirectoriesSegmentResponse, or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListFilesAndDirectoriesSegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "directory" + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_files_and_directories_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if include_extended_info is not None: + header_parameters['x-ms-file-extended-info'] = self._serialize.header("include_extended_info", include_extended_info, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} # type: ignore + + async def list_handles( + self, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + timeout: Optional[int] = None, + sharesnapshot: Optional[str] = None, + recursive: Optional[bool] = None, + **kwargs: Any + ) -> "_models.ListHandlesResponse": + """Lists handles for directory. + + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. If the request does not + specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 + items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param recursive: Specifies operation should apply to the directory specified in the URI, its + files, its subdirectories and their files. + :type recursive: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListHandlesResponse, or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListHandlesResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHandlesResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "listhandles" + accept = "application/xml" + + # Construct URL + url = self.list_handles.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if recursive is not None: + header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListHandlesResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_handles.metadata = {'url': '/{shareName}/{directory}'} # type: ignore + + async def force_close_handles( + self, + handle_id: str, + timeout: Optional[int] = None, + marker: Optional[str] = None, + sharesnapshot: Optional[str] = None, + recursive: Optional[bool] = None, + **kwargs: Any + ) -> None: + """Closes all handles open for given directory. + + :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk + (‘*’) is a wildcard that specifies all handles. + :type handle_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. + :type marker: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param recursive: Specifies operation should apply to the directory specified in the URI, its + files, its subdirectories and their files. + :type recursive: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "forceclosehandles" + accept = "application/xml" + + # Construct URL + url = self.force_close_handles.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') + if recursive is not None: + header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-marker']=self._deserialize('str', response.headers.get('x-ms-marker')) + response_headers['x-ms-number-of-handles-closed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')) + response_headers['x-ms-number-of-handles-failed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')) + + if cls: + return cls(pipeline_response, None, response_headers) + + force_close_handles.metadata = {'url': '/{shareName}/{directory}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_file_operations.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_file_operations.py new file mode 100644 index 0000000..eb8bddb --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_file_operations.py @@ -0,0 +1,1776 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class FileOperations: + """FileOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.fileshare.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create( + self, + file_content_length: int, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + file_permission: Optional[str] = "inherit", + file_permission_key: Optional[str] = None, + file_attributes: str = "none", + file_creation_time: str = "now", + file_last_write_time: str = "now", + file_http_headers: Optional["_models.FileHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Creates a new file or replaces a file. Note it only initializes the file with no content. + + :param file_content_length: Specifies the maximum size for the file, up to 4 TB. + :type file_content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. + :type metadata: str + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param file_attributes: If specified, the provided file attributes shall be set. Default value: + ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. Default value: Now. + :type file_last_write_time: str + :param file_http_headers: Parameter group. + :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _file_content_type = None + _file_content_encoding = None + _file_content_language = None + _file_cache_control = None + _file_content_md5 = None + _file_content_disposition = None + _lease_id = None + if file_http_headers is not None: + _file_content_type = file_http_headers.file_content_type + _file_content_encoding = file_http_headers.file_content_encoding + _file_content_language = file_http_headers.file_content_language + _file_cache_control = file_http_headers.file_cache_control + _file_content_md5 = file_http_headers.file_content_md5 + _file_content_disposition = file_http_headers.file_content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + file_type_constant = "file" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') + header_parameters['x-ms-type'] = self._serialize.header("file_type_constant", file_type_constant, 'str') + if _file_content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", _file_content_type, 'str') + if _file_content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", _file_content_encoding, 'str') + if _file_content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", _file_content_language, 'str') + if _file_cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", _file_cache_control, 'str') + if _file_content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", _file_content_md5, 'bytearray') + if _file_content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", _file_content_disposition, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) + response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) + response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) + response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) + response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) + response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + async def download( + self, + timeout: Optional[int] = None, + range: Optional[str] = None, + range_get_content_md5: Optional[bool] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> IO: + """Reads or downloads a file from the system, including its metadata and properties. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param range: Return file data only from the specified byte range. + :type range: str + :param range_get_content_md5: When this header is set to true and specified together with the + Range header, the service returns the MD5 hash for the range, as long as the range is less than + or equal to 4 MB in size. + :type range_get_content_md5: bool + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + accept = "application/xml" + + # Construct URL + url = self.download.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-content-md5')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) + response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) + response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) + response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) + response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-content-md5')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) + response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) + response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) + response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) + response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + async def get_properties( + self, + sharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Returns all user-defined metadata, standard HTTP properties, and system properties for the + file. It does not return the content of the file. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['x-ms-type']=self._deserialize('str', response.headers.get('x-ms-type')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) + response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) + response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) + response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) + response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + async def delete( + self, + timeout: Optional[int] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """removes the file from the storage account. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + async def set_http_headers( + self, + timeout: Optional[int] = None, + file_content_length: Optional[int] = None, + file_permission: Optional[str] = "inherit", + file_permission_key: Optional[str] = None, + file_attributes: str = "none", + file_creation_time: str = "now", + file_last_write_time: str = "now", + file_http_headers: Optional["_models.FileHTTPHeaders"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Sets HTTP headers on the file. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param file_content_length: Resizes a file to the specified size. If the specified byte value + is less than the current size of the file, then all ranges above the specified byte value are + cleared. + :type file_content_length: long + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param file_attributes: If specified, the provided file attributes shall be set. Default value: + ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. Default value: Now. + :type file_last_write_time: str + :param file_http_headers: Parameter group. + :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _file_content_type = None + _file_content_encoding = None + _file_content_language = None + _file_cache_control = None + _file_content_md5 = None + _file_content_disposition = None + _lease_id = None + if file_http_headers is not None: + _file_content_type = file_http_headers.file_content_type + _file_content_encoding = file_http_headers.file_content_encoding + _file_content_language = file_http_headers.file_content_language + _file_cache_control = file_http_headers.file_cache_control + _file_content_md5 = file_http_headers.file_content_md5 + _file_content_disposition = file_http_headers.file_content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.set_http_headers.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_content_length is not None: + header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') + if _file_content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", _file_content_type, 'str') + if _file_content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", _file_content_encoding, 'str') + if _file_content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", _file_content_language, 'str') + if _file_cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", _file_cache_control, 'str') + if _file_content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", _file_content_md5, 'bytearray') + if _file_content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", _file_content_disposition, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) + response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) + response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) + response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) + response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) + response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + async def set_metadata( + self, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Updates user-defined metadata for the specified file. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. + :type metadata: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "metadata" + accept = "application/xml" + + # Construct URL + url = self.set_metadata.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + async def acquire_lease( + self, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease File operation establishes and manages a lock on a file for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "lease" + action = "acquire" + accept = "application/xml" + + # Construct URL + url = self.acquire_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + async def release_lease( + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease File operation establishes and manages a lock on a file for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "lease" + action = "release" + accept = "application/xml" + + # Construct URL + url = self.release_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + async def change_lease( + self, + lease_id: str, + timeout: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease File operation establishes and manages a lock on a file for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "lease" + action = "change" + accept = "application/xml" + + # Construct URL + url = self.change_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + async def break_lease( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease File operation establishes and manages a lock on a file for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "lease" + action = "break" + accept = "application/xml" + + # Construct URL + url = self.break_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + async def upload_range( + self, + range: str, + content_length: int, + timeout: Optional[int] = None, + file_range_write: Union[str, "_models.FileRangeWriteType"] = "update", + content_md5: Optional[bytearray] = None, + optionalbody: Optional[IO] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Upload a range of bytes to a file. + + :param range: Specifies the range of bytes to be written. Both the start and end of the range + must be specified. For an update operation, the range can be up to 4 MB in size. For a clear + operation, the range can be up to the value of the file's full size. The File service accepts + only a single byte range for the Range and 'x-ms-range' headers, and the byte range must be + specified in the following format: bytes=startByte-endByte. + :type range: str + :param content_length: Specifies the number of bytes being transmitted in the request body. + When the x-ms-write header is set to clear, the value of this header must be set to zero. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param file_range_write: Specify one of the following options: - Update: Writes the bytes + specified by the request body into the specified range. The Range and Content-Length headers + must match to perform the update. - Clear: Clears the specified range and releases the space + used in storage for that range. To clear a range, set the Content-Length header to zero, and + set the Range header to a value that indicates the range to clear, up to maximum file size. + :type file_range_write: str or ~azure.storage.fileshare.models.FileRangeWriteType + :param content_md5: An MD5 hash of the content. This hash is used to verify the integrity of + the data during transport. When the Content-MD5 header is specified, the File service compares + the hash of the content that has arrived with the header value that was sent. If the two hashes + do not match, the operation will fail with error code 400 (Bad Request). + :type content_md5: bytearray + :param optionalbody: Initial data. + :type optionalbody: IO + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "range" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.upload_range.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = optionalbody + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + async def upload_range_from_url( + self, + range: str, + copy_source: str, + content_length: int, + timeout: Optional[int] = None, + source_range: Optional[str] = None, + source_content_crc64: Optional[bytearray] = None, + copy_source_authorization: Optional[str] = None, + source_modified_access_conditions: Optional["_models.SourceModifiedAccessConditions"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Upload a range of bytes to a file where the contents are read from a URL. + + :param range: Writes data to the specified byte range in the file. + :type range: str + :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy + a file to another file within the same storage account, you may use Shared Key to authenticate + the source file. If you are copying a file from another storage account, or if you are copying + a blob from the same storage account or another storage account, then you must authenticate the + source file or blob using a shared access signature. If the source is a public blob, no + authentication is required to perform the copy operation. A file in a share snapshot can also + be specified as a copy source. + :type copy_source: str + :param content_length: Specifies the number of bytes being transmitted in the request body. + When the x-ms-write header is set to clear, the value of this header must be set to zero. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_crc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. + :type source_content_crc64: bytearray + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. + :type copy_source_authorization: str + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.fileshare.models.SourceModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _source_if_match_crc64 = None + _source_if_none_match_crc64 = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if source_modified_access_conditions is not None: + _source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 + _source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 + comp = "range" + accept = "application/xml" + + # Construct URL + url = self.upload_range_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if source_content_crc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') + if _source_if_match_crc64 is not None: + header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", _source_if_match_crc64, 'bytearray') + if _source_if_none_match_crc64 is not None: + header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", _source_if_none_match_crc64, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if copy_source_authorization is not None: + header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + async def get_range_list( + self, + sharesnapshot: Optional[str] = None, + prevsharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> "_models.ShareFileRangeList": + """Returns the list of valid ranges for a file. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param prevsharesnapshot: The previous snapshot parameter is an opaque DateTime value that, + when present, specifies the previous snapshot. + :type prevsharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param range: Specifies the range of bytes over which to list ranges, inclusively. + :type range: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ShareFileRangeList, or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ShareFileRangeList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareFileRangeList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "rangelist" + accept = "application/xml" + + # Construct URL + url = self.get_range_list.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if prevsharesnapshot is not None: + query_parameters['prevsharesnapshot'] = self._serialize.query("prevsharesnapshot", prevsharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['x-ms-content-length']=self._deserialize('long', response.headers.get('x-ms-content-length')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ShareFileRangeList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + async def start_copy( + self, + copy_source: str, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + file_permission: Optional[str] = "inherit", + file_permission_key: Optional[str] = None, + copy_file_smb_info: Optional["_models.CopyFileSmbInfo"] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Copies a blob or file to a destination file within the storage account. + + :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy + a file to another file within the same storage account, you may use Shared Key to authenticate + the source file. If you are copying a file from another storage account, or if you are copying + a blob from the same storage account or another storage account, then you must authenticate the + source file or blob using a shared access signature. If the source is a public blob, no + authentication is required to perform the copy operation. A file in a share snapshot can also + be specified as a copy source. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. + :type metadata: str + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param copy_file_smb_info: Parameter group. + :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _file_permission_copy_mode = None + _ignore_read_only = None + _file_attributes = None + _file_creation_time = None + _file_last_write_time = None + _set_archive_attribute = None + _lease_id = None + if copy_file_smb_info is not None: + _file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode + _ignore_read_only = copy_file_smb_info.ignore_read_only + _file_attributes = copy_file_smb_info.file_attributes + _file_creation_time = copy_file_smb_info.file_creation_time + _file_last_write_time = copy_file_smb_info.file_last_write_time + _set_archive_attribute = copy_file_smb_info.set_archive_attribute + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + accept = "application/xml" + + # Construct URL + url = self.start_copy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + if _file_permission_copy_mode is not None: + header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", _file_permission_copy_mode, 'str') + if _ignore_read_only is not None: + header_parameters['x-ms-file-copy-ignore-read-only'] = self._serialize.header("ignore_read_only", _ignore_read_only, 'bool') + if _file_attributes is not None: + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", _file_attributes, 'str') + if _file_creation_time is not None: + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", _file_creation_time, 'str') + if _file_last_write_time is not None: + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", _file_last_write_time, 'str') + if _set_archive_attribute is not None: + header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", _set_archive_attribute, 'bool') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + + if cls: + return cls(pipeline_response, None, response_headers) + + start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + async def abort_copy( + self, + copy_id: str, + timeout: Optional[int] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Aborts a pending Copy File operation, and leaves a destination file with zero length and full + metadata. + + :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy + File operation. + :type copy_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "copy" + copy_action_abort_constant = "abort" + accept = "application/xml" + + # Construct URL + url = self.abort_copy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + async def list_handles( + self, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + timeout: Optional[int] = None, + sharesnapshot: Optional[str] = None, + **kwargs: Any + ) -> "_models.ListHandlesResponse": + """Lists handles for file. + + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. If the request does not + specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 + items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListHandlesResponse, or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListHandlesResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHandlesResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "listhandles" + accept = "application/xml" + + # Construct URL + url = self.list_handles.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListHandlesResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + async def force_close_handles( + self, + handle_id: str, + timeout: Optional[int] = None, + marker: Optional[str] = None, + sharesnapshot: Optional[str] = None, + **kwargs: Any + ) -> None: + """Closes all handles open for given file. + + :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk + (‘*’) is a wildcard that specifies all handles. + :type handle_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. + :type marker: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "forceclosehandles" + accept = "application/xml" + + # Construct URL + url = self.force_close_handles.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-marker']=self._deserialize('str', response.headers.get('x-ms-marker')) + response_headers['x-ms-number-of-handles-closed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')) + response_headers['x-ms-number-of-handles-failed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')) + + if cls: + return cls(pipeline_response, None, response_headers) + + force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_service_operations.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_service_operations.py new file mode 100644 index 0000000..f413f29 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_service_operations.py @@ -0,0 +1,269 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class ServiceOperations: + """ServiceOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.fileshare.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def set_properties( + self, + storage_service_properties: "_models.StorageServiceProperties", + timeout: Optional[int] = None, + **kwargs: Any + ) -> None: + """Sets properties for a storage account's File service endpoint, including properties for Storage + Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. + :type storage_service_properties: ~azure.storage.fileshare.models.StorageServiceProperties + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "properties" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.set_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {'url': '/'} # type: ignore + + async def get_properties( + self, + timeout: Optional[int] = None, + **kwargs: Any + ) -> "_models.StorageServiceProperties": + """Gets the properties of a storage account's File service, including properties for Storage + Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageServiceProperties, or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.StorageServiceProperties + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = self._deserialize('StorageServiceProperties', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_properties.metadata = {'url': '/'} # type: ignore + + async def list_shares_segment( + self, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListSharesIncludeType"]]] = None, + timeout: Optional[int] = None, + **kwargs: Any + ) -> "_models.ListSharesResponse": + """The List Shares Segment operation returns a list of the shares and share snapshots under the + specified account. + + :param prefix: Filters the results to return only entries whose name begins with the specified + prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. If the request does not + specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 + items. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. + :type include: list[str or ~azure.storage.fileshare.models.ListSharesIncludeType] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListSharesResponse, or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListSharesResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListSharesResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_shares_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = self._deserialize('ListSharesResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_shares_segment.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_share_operations.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_share_operations.py new file mode 100644 index 0000000..ca08ada --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/aio/operations/_share_operations.py @@ -0,0 +1,1485 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class ShareOperations: + """ShareOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.fileshare.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create( + self, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + quota: Optional[int] = None, + access_tier: Optional[Union[str, "_models.ShareAccessTier"]] = None, + enabled_protocols: Optional[str] = None, + root_squash: Optional[Union[str, "_models.ShareRootSquash"]] = None, + **kwargs: Any + ) -> None: + """Creates a new share under the specified account. If the share with the same name already + exists, the operation fails. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. + :type metadata: str + :param quota: Specifies the maximum size of the share, in gigabytes. + :type quota: int + :param access_tier: Specifies the access tier of the share. + :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier + :param enabled_protocols: Protocols to enable on the share. + :type enabled_protocols: str + :param root_squash: Root squash to set on the share. Only valid for NFS shares. + :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "share" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if quota is not None: + header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) + if access_tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if enabled_protocols is not None: + header_parameters['x-ms-enabled-protocols'] = self._serialize.header("enabled_protocols", enabled_protocols, 'str') + if root_squash is not None: + header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{shareName}'} # type: ignore + + async def get_properties( + self, + sharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Returns all user-defined metadata and system properties for the specified share or share + snapshot. The data returned does not include the share's list of files. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "share" + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-share-quota']=self._deserialize('int', response.headers.get('x-ms-share-quota')) + response_headers['x-ms-share-provisioned-iops']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')) + response_headers['x-ms-share-provisioned-ingress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')) + response_headers['x-ms-share-provisioned-egress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')) + response_headers['x-ms-share-next-allowed-quota-downgrade-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) + response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) + response_headers['x-ms-access-tier-transition-state']=self._deserialize('str', response.headers.get('x-ms-access-tier-transition-state')) + response_headers['x-ms-enabled-protocols']=self._deserialize('str', response.headers.get('x-ms-enabled-protocols')) + response_headers['x-ms-root-squash']=self._deserialize('str', response.headers.get('x-ms-root-squash')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{shareName}'} # type: ignore + + async def delete( + self, + sharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + delete_snapshots: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Operation marks the specified share or share snapshot for deletion. The share or share snapshot + and any files contained within it are later deleted during garbage collection. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param delete_snapshots: Specifies the option include to delete the base share and all of its + snapshots. + :type delete_snapshots: str or ~azure.storage.fileshare.models.DeleteSnapshotsOptionType + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "share" + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if delete_snapshots is not None: + header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{shareName}'} # type: ignore + + async def acquire_lease( + self, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + sharesnapshot: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "lease" + action = "acquire" + restype = "share" + accept = "application/xml" + + # Construct URL + url = self.acquire_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {'url': '/{shareName}'} # type: ignore + + async def release_lease( + self, + lease_id: str, + timeout: Optional[int] = None, + sharesnapshot: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "lease" + action = "release" + restype = "share" + accept = "application/xml" + + # Construct URL + url = self.release_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {'url': '/{shareName}'} # type: ignore + + async def change_lease( + self, + lease_id: str, + timeout: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + sharesnapshot: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "lease" + action = "change" + restype = "share" + accept = "application/xml" + + # Construct URL + url = self.change_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {'url': '/{shareName}'} # type: ignore + + async def renew_lease( + self, + lease_id: str, + timeout: Optional[int] = None, + sharesnapshot: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "lease" + action = "renew" + restype = "share" + accept = "application/xml" + + # Construct URL + url = self.renew_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + renew_lease.metadata = {'url': '/{shareName}'} # type: ignore + + async def break_lease( + self, + timeout: Optional[int] = None, + break_period: Optional[int] = None, + request_id_parameter: Optional[str] = None, + sharesnapshot: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param break_period: For a break operation, proposed duration the lease should continue before + it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining on the lease is used. A new + lease will not be available before the break period has expired, but the lease may be held for + longer than the break period. If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease + breaks immediately. + :type break_period: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "lease" + action = "break" + restype = "share" + accept = "application/xml" + + # Construct URL + url = self.break_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {'url': '/{shareName}'} # type: ignore + + async def create_snapshot( + self, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + **kwargs: Any + ) -> None: + """Creates a read-only snapshot of a share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. + :type metadata: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "share" + comp = "snapshot" + accept = "application/xml" + + # Construct URL + url = self.create_snapshot.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create_snapshot.metadata = {'url': '/{shareName}'} # type: ignore + + async def create_permission( + self, + share_permission: "_models.SharePermission", + timeout: Optional[int] = None, + **kwargs: Any + ) -> None: + """Create a permission (a security descriptor). + + :param share_permission: A permission (a security descriptor) at the share level. + :type share_permission: ~azure.storage.fileshare.models.SharePermission + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "share" + comp = "filepermission" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/xml" + + # Construct URL + url = self.create_permission.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(share_permission, 'SharePermission') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create_permission.metadata = {'url': '/{shareName}'} # type: ignore + + async def get_permission( + self, + file_permission_key: str, + timeout: Optional[int] = None, + **kwargs: Any + ) -> "_models.SharePermission": + """Returns the permission (security descriptor) for a given key. + + :param file_permission_key: Key of the permission to be set for the directory/file. + :type file_permission_key: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SharePermission, or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.SharePermission + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.SharePermission"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "share" + comp = "filepermission" + accept = "application/json" + + # Construct URL + url = self.get_permission.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('SharePermission', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_permission.metadata = {'url': '/{shareName}'} # type: ignore + + async def set_properties( + self, + timeout: Optional[int] = None, + quota: Optional[int] = None, + access_tier: Optional[Union[str, "_models.ShareAccessTier"]] = None, + root_squash: Optional[Union[str, "_models.ShareRootSquash"]] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Sets properties for the specified share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param quota: Specifies the maximum size of the share, in gigabytes. + :type quota: int + :param access_tier: Specifies the access tier of the share. + :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier + :param root_squash: Root squash to set on the share. Only valid for NFS shares. + :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "share" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.set_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if quota is not None: + header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) + if access_tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if root_squash is not None: + header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {'url': '/{shareName}'} # type: ignore + + async def set_metadata( + self, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Sets one or more user-defined name-value pairs for the specified share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. + :type metadata: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "share" + comp = "metadata" + accept = "application/xml" + + # Construct URL + url = self.set_metadata.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {'url': '/{shareName}'} # type: ignore + + async def get_access_policy( + self, + timeout: Optional[int] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> List["_models.SignedIdentifier"]: + """Returns information about stored access policies specified on the share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of SignedIdentifier, or the result of cls(response) + :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "share" + comp = "acl" + accept = "application/xml" + + # Construct URL + url = self.get_access_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_access_policy.metadata = {'url': '/{shareName}'} # type: ignore + + async def set_access_policy( + self, + timeout: Optional[int] = None, + share_acl: Optional[List["_models.SignedIdentifier"]] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> None: + """Sets a stored access policy for use with shared access signatures. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param share_acl: The ACL for the share. + :type share_acl: list[~azure.storage.fileshare.models.SignedIdentifier] + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "share" + comp = "acl" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.set_access_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True}} + if share_acl is not None: + body_content = self._serialize.body(share_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_policy.metadata = {'url': '/{shareName}'} # type: ignore + + async def get_statistics( + self, + timeout: Optional[int] = None, + lease_access_conditions: Optional["_models.LeaseAccessConditions"] = None, + **kwargs: Any + ) -> "_models.ShareStats": + """Retrieves statistics related to the share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ShareStats, or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ShareStats + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareStats"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "share" + comp = "stats" + accept = "application/xml" + + # Construct URL + url = self.get_statistics.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ShareStats', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_statistics.metadata = {'url': '/{shareName}'} # type: ignore + + async def restore( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + deleted_share_name: Optional[str] = None, + deleted_share_version: Optional[str] = None, + **kwargs: Any + ) -> None: + """Restores a previously deleted Share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param deleted_share_name: Specifies the name of the preivously-deleted share. + :type deleted_share_name: str + :param deleted_share_version: Specifies the version of the preivously-deleted share. + :type deleted_share_version: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "share" + comp = "undelete" + accept = "application/xml" + + # Construct URL + url = self.restore.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if deleted_share_name is not None: + header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str') + if deleted_share_version is not None: + header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + restore.metadata = {'url': '/{shareName}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/__init__.py new file mode 100644 index 0000000..27d6752 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/__init__.py @@ -0,0 +1,127 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import AccessPolicy + from ._models_py3 import ClearRange + from ._models_py3 import CopyFileSmbInfo + from ._models_py3 import CorsRule + from ._models_py3 import DirectoryItem + from ._models_py3 import FileHTTPHeaders + from ._models_py3 import FileItem + from ._models_py3 import FileProperty + from ._models_py3 import FileRange + from ._models_py3 import FilesAndDirectoriesListSegment + from ._models_py3 import HandleItem + from ._models_py3 import LeaseAccessConditions + from ._models_py3 import ListFilesAndDirectoriesSegmentResponse + from ._models_py3 import ListHandlesResponse + from ._models_py3 import ListSharesResponse + from ._models_py3 import Metrics + from ._models_py3 import RetentionPolicy + from ._models_py3 import ShareFileRangeList + from ._models_py3 import ShareItemInternal + from ._models_py3 import SharePermission + from ._models_py3 import SharePropertiesInternal + from ._models_py3 import ShareProtocolSettings + from ._models_py3 import ShareSmbSettings + from ._models_py3 import ShareStats + from ._models_py3 import SignedIdentifier + from ._models_py3 import SmbMultichannel + from ._models_py3 import SourceModifiedAccessConditions + from ._models_py3 import StorageError + from ._models_py3 import StorageServiceProperties +except (SyntaxError, ImportError): + from ._models import AccessPolicy # type: ignore + from ._models import ClearRange # type: ignore + from ._models import CopyFileSmbInfo # type: ignore + from ._models import CorsRule # type: ignore + from ._models import DirectoryItem # type: ignore + from ._models import FileHTTPHeaders # type: ignore + from ._models import FileItem # type: ignore + from ._models import FileProperty # type: ignore + from ._models import FileRange # type: ignore + from ._models import FilesAndDirectoriesListSegment # type: ignore + from ._models import HandleItem # type: ignore + from ._models import LeaseAccessConditions # type: ignore + from ._models import ListFilesAndDirectoriesSegmentResponse # type: ignore + from ._models import ListHandlesResponse # type: ignore + from ._models import ListSharesResponse # type: ignore + from ._models import Metrics # type: ignore + from ._models import RetentionPolicy # type: ignore + from ._models import ShareFileRangeList # type: ignore + from ._models import ShareItemInternal # type: ignore + from ._models import SharePermission # type: ignore + from ._models import SharePropertiesInternal # type: ignore + from ._models import ShareProtocolSettings # type: ignore + from ._models import ShareSmbSettings # type: ignore + from ._models import ShareStats # type: ignore + from ._models import SignedIdentifier # type: ignore + from ._models import SmbMultichannel # type: ignore + from ._models import SourceModifiedAccessConditions # type: ignore + from ._models import StorageError # type: ignore + from ._models import StorageServiceProperties # type: ignore + +from ._azure_file_storage_enums import ( + CopyStatusType, + DeleteSnapshotsOptionType, + FileRangeWriteType, + LeaseDurationType, + LeaseStateType, + LeaseStatusType, + ListFilesIncludeType, + ListSharesIncludeType, + PermissionCopyModeType, + ShareAccessTier, + ShareRootSquash, + StorageErrorCode, +) + +__all__ = [ + 'AccessPolicy', + 'ClearRange', + 'CopyFileSmbInfo', + 'CorsRule', + 'DirectoryItem', + 'FileHTTPHeaders', + 'FileItem', + 'FileProperty', + 'FileRange', + 'FilesAndDirectoriesListSegment', + 'HandleItem', + 'LeaseAccessConditions', + 'ListFilesAndDirectoriesSegmentResponse', + 'ListHandlesResponse', + 'ListSharesResponse', + 'Metrics', + 'RetentionPolicy', + 'ShareFileRangeList', + 'ShareItemInternal', + 'SharePermission', + 'SharePropertiesInternal', + 'ShareProtocolSettings', + 'ShareSmbSettings', + 'ShareStats', + 'SignedIdentifier', + 'SmbMultichannel', + 'SourceModifiedAccessConditions', + 'StorageError', + 'StorageServiceProperties', + 'CopyStatusType', + 'DeleteSnapshotsOptionType', + 'FileRangeWriteType', + 'LeaseDurationType', + 'LeaseStateType', + 'LeaseStatusType', + 'ListFilesIncludeType', + 'ListSharesIncludeType', + 'PermissionCopyModeType', + 'ShareAccessTier', + 'ShareRootSquash', + 'StorageErrorCode', +] diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_azure_file_storage_enums.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_azure_file_storage_enums.py new file mode 100644 index 0000000..1c8b351 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_azure_file_storage_enums.py @@ -0,0 +1,169 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum, EnumMeta +from six import with_metaclass + +class _CaseInsensitiveEnumMeta(EnumMeta): + def __getitem__(self, name): + return super().__getitem__(name.upper()) + + def __getattr__(cls, name): + """Return the enum member matching `name` + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + """ + try: + return cls._member_map_[name.upper()] + except KeyError: + raise AttributeError(name) + + +class CopyStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + PENDING = "pending" + SUCCESS = "success" + ABORTED = "aborted" + FAILED = "failed" + +class DeleteSnapshotsOptionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + INCLUDE = "include" + INCLUDE_LEASED = "include-leased" + +class FileRangeWriteType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + UPDATE = "update" + CLEAR = "clear" + +class LeaseDurationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """When a share is leased, specifies whether the lease is of infinite or fixed duration. + """ + + INFINITE = "infinite" + FIXED = "fixed" + +class LeaseStateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Lease state of the share. + """ + + AVAILABLE = "available" + LEASED = "leased" + EXPIRED = "expired" + BREAKING = "breaking" + BROKEN = "broken" + +class LeaseStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The current lease status of the share. + """ + + LOCKED = "locked" + UNLOCKED = "unlocked" + +class ListFilesIncludeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + TIMESTAMPS = "Timestamps" + ETAG = "Etag" + ATTRIBUTES = "Attributes" + PERMISSION_KEY = "PermissionKey" + +class ListSharesIncludeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + SNAPSHOTS = "snapshots" + METADATA = "metadata" + DELETED = "deleted" + +class PermissionCopyModeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + SOURCE = "source" + OVERRIDE = "override" + +class ShareAccessTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + TRANSACTION_OPTIMIZED = "TransactionOptimized" + HOT = "Hot" + COOL = "Cool" + +class ShareRootSquash(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + + NO_ROOT_SQUASH = "NoRootSquash" + ROOT_SQUASH = "RootSquash" + ALL_SQUASH = "AllSquash" + +class StorageErrorCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Error codes returned by the service + """ + + ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists" + ACCOUNT_BEING_CREATED = "AccountBeingCreated" + ACCOUNT_IS_DISABLED = "AccountIsDisabled" + AUTHENTICATION_FAILED = "AuthenticationFailed" + AUTHORIZATION_FAILURE = "AuthorizationFailure" + CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported" + CONDITION_NOT_MET = "ConditionNotMet" + EMPTY_METADATA_KEY = "EmptyMetadataKey" + INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions" + INTERNAL_ERROR = "InternalError" + INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo" + INVALID_HEADER_VALUE = "InvalidHeaderValue" + INVALID_HTTP_VERB = "InvalidHttpVerb" + INVALID_INPUT = "InvalidInput" + INVALID_MD5 = "InvalidMd5" + INVALID_METADATA = "InvalidMetadata" + INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue" + INVALID_RANGE = "InvalidRange" + INVALID_RESOURCE_NAME = "InvalidResourceName" + INVALID_URI = "InvalidUri" + INVALID_XML_DOCUMENT = "InvalidXmlDocument" + INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue" + MD5_MISMATCH = "Md5Mismatch" + METADATA_TOO_LARGE = "MetadataTooLarge" + MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader" + MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter" + MISSING_REQUIRED_HEADER = "MissingRequiredHeader" + MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode" + MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported" + OPERATION_TIMED_OUT = "OperationTimedOut" + OUT_OF_RANGE_INPUT = "OutOfRangeInput" + OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue" + REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge" + RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch" + REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse" + RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists" + RESOURCE_NOT_FOUND = "ResourceNotFound" + SERVER_BUSY = "ServerBusy" + UNSUPPORTED_HEADER = "UnsupportedHeader" + UNSUPPORTED_XML_NODE = "UnsupportedXmlNode" + UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter" + UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb" + CANNOT_DELETE_FILE_OR_DIRECTORY = "CannotDeleteFileOrDirectory" + CLIENT_CACHE_FLUSH_DELAY = "ClientCacheFlushDelay" + DELETE_PENDING = "DeletePending" + DIRECTORY_NOT_EMPTY = "DirectoryNotEmpty" + FILE_LOCK_CONFLICT = "FileLockConflict" + INVALID_FILE_OR_DIRECTORY_PATH_NAME = "InvalidFileOrDirectoryPathName" + PARENT_NOT_FOUND = "ParentNotFound" + READ_ONLY_ATTRIBUTE = "ReadOnlyAttribute" + SHARE_ALREADY_EXISTS = "ShareAlreadyExists" + SHARE_BEING_DELETED = "ShareBeingDeleted" + SHARE_DISABLED = "ShareDisabled" + SHARE_NOT_FOUND = "ShareNotFound" + SHARING_VIOLATION = "SharingViolation" + SHARE_SNAPSHOT_IN_PROGRESS = "ShareSnapshotInProgress" + SHARE_SNAPSHOT_COUNT_EXCEEDED = "ShareSnapshotCountExceeded" + SHARE_SNAPSHOT_OPERATION_NOT_SUPPORTED = "ShareSnapshotOperationNotSupported" + SHARE_HAS_SNAPSHOTS = "ShareHasSnapshots" + CONTAINER_QUOTA_DOWNGRADE_NOT_ALLOWED = "ContainerQuotaDowngradeNotAllowed" + AUTHORIZATION_SOURCE_IP_MISMATCH = "AuthorizationSourceIPMismatch" + AUTHORIZATION_PROTOCOL_MISMATCH = "AuthorizationProtocolMismatch" + AUTHORIZATION_PERMISSION_MISMATCH = "AuthorizationPermissionMismatch" + AUTHORIZATION_SERVICE_MISMATCH = "AuthorizationServiceMismatch" + AUTHORIZATION_RESOURCE_TYPE_MISMATCH = "AuthorizationResourceTypeMismatch" + FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch" diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_models.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_models.py new file mode 100644 index 0000000..024a56b --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_models.py @@ -0,0 +1,1115 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.core.exceptions import HttpResponseError +import msrest.serialization + + +class AccessPolicy(msrest.serialization.Model): + """An Access policy. + + :param start: The date-time the policy is active. + :type start: str + :param expiry: The date-time the policy expires. + :type expiry: str + :param permission: The permissions for the ACL policy. + :type permission: str + """ + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str'}, + 'expiry': {'key': 'Expiry', 'type': 'str'}, + 'permission': {'key': 'Permission', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(AccessPolicy, self).__init__(**kwargs) + self.start = kwargs.get('start', None) + self.expiry = kwargs.get('expiry', None) + self.permission = kwargs.get('permission', None) + + +class ClearRange(msrest.serialization.Model): + """ClearRange. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. + :type start: long + :param end: Required. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'ClearRange' + } + + def __init__( + self, + **kwargs + ): + super(ClearRange, self).__init__(**kwargs) + self.start = kwargs['start'] + self.end = kwargs['end'] + + +class CopyFileSmbInfo(msrest.serialization.Model): + """Parameter group. + + :param file_permission_copy_mode: Specifies the option to copy file security descriptor from + source file or to set it using the value which is defined by the header value of + x-ms-file-permission or x-ms-file-permission-key. Possible values include: "source", + "override". + :type file_permission_copy_mode: str or ~azure.storage.fileshare.models.PermissionCopyModeType + :param ignore_read_only: Specifies the option to overwrite the target file if it already exists + and has read-only attribute set. + :type ignore_read_only: bool + :param file_attributes: Specifies either the option to copy file attributes from a source + file(source) to a target file or a list of attributes to set on a target file. + :type file_attributes: str + :param file_creation_time: Specifies either the option to copy file creation time from a source + file(source) to a target file or a time value in ISO 8601 format to set as creation time on a + target file. + :type file_creation_time: str + :param file_last_write_time: Specifies either the option to copy file last write time from a + source file(source) to a target file or a time value in ISO 8601 format to set as last write + time on a target file. + :type file_last_write_time: str + :param set_archive_attribute: Specifies the option to set archive attribute on a target file. + True means archive attribute will be set on a target file despite attribute overrides or a + source file state. + :type set_archive_attribute: bool + """ + + _attribute_map = { + 'file_permission_copy_mode': {'key': 'filePermissionCopyMode', 'type': 'str'}, + 'ignore_read_only': {'key': 'ignoreReadOnly', 'type': 'bool'}, + 'file_attributes': {'key': 'fileAttributes', 'type': 'str'}, + 'file_creation_time': {'key': 'fileCreationTime', 'type': 'str'}, + 'file_last_write_time': {'key': 'fileLastWriteTime', 'type': 'str'}, + 'set_archive_attribute': {'key': 'setArchiveAttribute', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(CopyFileSmbInfo, self).__init__(**kwargs) + self.file_permission_copy_mode = kwargs.get('file_permission_copy_mode', None) + self.ignore_read_only = kwargs.get('ignore_read_only', None) + self.file_attributes = kwargs.get('file_attributes', None) + self.file_creation_time = kwargs.get('file_creation_time', None) + self.file_last_write_time = kwargs.get('file_last_write_time', None) + self.set_archive_attribute = kwargs.get('set_archive_attribute', None) + + +class CorsRule(msrest.serialization.Model): + """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param allowed_origins: Required. The origin domains that are permitted to make a request + against the storage service via CORS. The origin domain is the domain from which the request + originates. Note that the origin must be an exact case-sensitive match with the origin that the + user age sends to the service. You can also use the wildcard character '*' to allow all origin + domains to make requests via CORS. + :type allowed_origins: str + :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may + use for a CORS request. (comma separated). + :type allowed_methods: str + :param allowed_headers: Required. The request headers that the origin domain may specify on the + CORS request. + :type allowed_headers: str + :param exposed_headers: Required. The response headers that may be sent in the response to the + CORS request and exposed by the browser to the request issuer. + :type exposed_headers: str + :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the + preflight OPTIONS request. + :type max_age_in_seconds: int + """ + + _validation = { + 'allowed_origins': {'required': True}, + 'allowed_methods': {'required': True}, + 'allowed_headers': {'required': True}, + 'exposed_headers': {'required': True}, + 'max_age_in_seconds': {'required': True, 'minimum': 0}, + } + + _attribute_map = { + 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, + 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, + 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, + 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, + 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(CorsRule, self).__init__(**kwargs) + self.allowed_origins = kwargs['allowed_origins'] + self.allowed_methods = kwargs['allowed_methods'] + self.allowed_headers = kwargs['allowed_headers'] + self.exposed_headers = kwargs['exposed_headers'] + self.max_age_in_seconds = kwargs['max_age_in_seconds'] + + +class DirectoryItem(msrest.serialization.Model): + """A listed directory item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param file_id: + :type file_id: str + :param properties: File properties. + :type properties: ~azure.storage.fileshare.models.FileProperty + :param attributes: + :type attributes: str + :param permission_key: + :type permission_key: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'file_id': {'key': 'FileId', 'type': 'str'}, + 'properties': {'key': 'Properties', 'type': 'FileProperty'}, + 'attributes': {'key': 'Attributes', 'type': 'str'}, + 'permission_key': {'key': 'PermissionKey', 'type': 'str'}, + } + _xml_map = { + 'name': 'Directory' + } + + def __init__( + self, + **kwargs + ): + super(DirectoryItem, self).__init__(**kwargs) + self.name = kwargs['name'] + self.file_id = kwargs.get('file_id', None) + self.properties = kwargs.get('properties', None) + self.attributes = kwargs.get('attributes', None) + self.permission_key = kwargs.get('permission_key', None) + + +class FileHTTPHeaders(msrest.serialization.Model): + """Parameter group. + + :param file_content_type: Sets the MIME content type of the file. The default type is + 'application/octet-stream'. + :type file_content_type: str + :param file_content_encoding: Specifies which content encodings have been applied to the file. + :type file_content_encoding: str + :param file_content_language: Specifies the natural languages used by this resource. + :type file_content_language: str + :param file_cache_control: Sets the file's cache control. The File service stores this value + but does not use or modify it. + :type file_cache_control: str + :param file_content_md5: Sets the file's MD5 hash. + :type file_content_md5: bytearray + :param file_content_disposition: Sets the file's Content-Disposition header. + :type file_content_disposition: str + """ + + _attribute_map = { + 'file_content_type': {'key': 'fileContentType', 'type': 'str'}, + 'file_content_encoding': {'key': 'fileContentEncoding', 'type': 'str'}, + 'file_content_language': {'key': 'fileContentLanguage', 'type': 'str'}, + 'file_cache_control': {'key': 'fileCacheControl', 'type': 'str'}, + 'file_content_md5': {'key': 'fileContentMD5', 'type': 'bytearray'}, + 'file_content_disposition': {'key': 'fileContentDisposition', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(FileHTTPHeaders, self).__init__(**kwargs) + self.file_content_type = kwargs.get('file_content_type', None) + self.file_content_encoding = kwargs.get('file_content_encoding', None) + self.file_content_language = kwargs.get('file_content_language', None) + self.file_cache_control = kwargs.get('file_cache_control', None) + self.file_content_md5 = kwargs.get('file_content_md5', None) + self.file_content_disposition = kwargs.get('file_content_disposition', None) + + +class FileItem(msrest.serialization.Model): + """A listed file item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param file_id: + :type file_id: str + :param properties: Required. File properties. + :type properties: ~azure.storage.fileshare.models.FileProperty + :param attributes: + :type attributes: str + :param permission_key: + :type permission_key: str + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'file_id': {'key': 'FileId', 'type': 'str'}, + 'properties': {'key': 'Properties', 'type': 'FileProperty'}, + 'attributes': {'key': 'Attributes', 'type': 'str'}, + 'permission_key': {'key': 'PermissionKey', 'type': 'str'}, + } + _xml_map = { + 'name': 'File' + } + + def __init__( + self, + **kwargs + ): + super(FileItem, self).__init__(**kwargs) + self.name = kwargs['name'] + self.file_id = kwargs.get('file_id', None) + self.properties = kwargs['properties'] + self.attributes = kwargs.get('attributes', None) + self.permission_key = kwargs.get('permission_key', None) + + +class FileProperty(msrest.serialization.Model): + """File properties. + + All required parameters must be populated in order to send to Azure. + + :param content_length: Required. Content length of the file. This value may not be up-to-date + since an SMB client may have modified the file locally. The value of Content-Length may not + reflect that fact until the handle is closed or the op-lock is broken. To retrieve current + property values, call Get File Properties. + :type content_length: long + :param creation_time: + :type creation_time: ~datetime.datetime + :param last_access_time: + :type last_access_time: ~datetime.datetime + :param last_write_time: + :type last_write_time: ~datetime.datetime + :param change_time: + :type change_time: ~datetime.datetime + :param last_modified: + :type last_modified: ~datetime.datetime + :param etag: + :type etag: str + """ + + _validation = { + 'content_length': {'required': True}, + } + + _attribute_map = { + 'content_length': {'key': 'Content-Length', 'type': 'long'}, + 'creation_time': {'key': 'CreationTime', 'type': 'iso-8601'}, + 'last_access_time': {'key': 'LastAccessTime', 'type': 'iso-8601'}, + 'last_write_time': {'key': 'LastWriteTime', 'type': 'iso-8601'}, + 'change_time': {'key': 'ChangeTime', 'type': 'iso-8601'}, + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, + 'etag': {'key': 'Etag', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(FileProperty, self).__init__(**kwargs) + self.content_length = kwargs['content_length'] + self.creation_time = kwargs.get('creation_time', None) + self.last_access_time = kwargs.get('last_access_time', None) + self.last_write_time = kwargs.get('last_write_time', None) + self.change_time = kwargs.get('change_time', None) + self.last_modified = kwargs.get('last_modified', None) + self.etag = kwargs.get('etag', None) + + +class FileRange(msrest.serialization.Model): + """An Azure Storage file range. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. Start of the range. + :type start: long + :param end: Required. End of the range. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long'}, + 'end': {'key': 'End', 'type': 'long'}, + } + _xml_map = { + 'name': 'Range' + } + + def __init__( + self, + **kwargs + ): + super(FileRange, self).__init__(**kwargs) + self.start = kwargs['start'] + self.end = kwargs['end'] + + +class FilesAndDirectoriesListSegment(msrest.serialization.Model): + """Abstract for entries that can be listed from Directory. + + All required parameters must be populated in order to send to Azure. + + :param directory_items: Required. + :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] + :param file_items: Required. + :type file_items: list[~azure.storage.fileshare.models.FileItem] + """ + + _validation = { + 'directory_items': {'required': True}, + 'file_items': {'required': True}, + } + + _attribute_map = { + 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]'}, + 'file_items': {'key': 'FileItems', 'type': '[FileItem]'}, + } + _xml_map = { + 'name': 'Entries' + } + + def __init__( + self, + **kwargs + ): + super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) + self.directory_items = kwargs['directory_items'] + self.file_items = kwargs['file_items'] + + +class HandleItem(msrest.serialization.Model): + """A listed Azure Storage handle item. + + All required parameters must be populated in order to send to Azure. + + :param handle_id: Required. XSMB service handle ID. + :type handle_id: str + :param path: Required. File or directory name including full path starting from share root. + :type path: str + :param file_id: Required. FileId uniquely identifies the file or directory. + :type file_id: str + :param parent_id: ParentId uniquely identifies the parent directory of the object. + :type parent_id: str + :param session_id: Required. SMB session ID in context of which the file handle was opened. + :type session_id: str + :param client_ip: Required. Client IP that opened the handle. + :type client_ip: str + :param open_time: Required. Time when the session that previously opened the handle has last + been reconnected. (UTC). + :type open_time: ~datetime.datetime + :param last_reconnect_time: Time handle was last connected to (UTC). + :type last_reconnect_time: ~datetime.datetime + """ + + _validation = { + 'handle_id': {'required': True}, + 'path': {'required': True}, + 'file_id': {'required': True}, + 'session_id': {'required': True}, + 'client_ip': {'required': True}, + 'open_time': {'required': True}, + } + + _attribute_map = { + 'handle_id': {'key': 'HandleId', 'type': 'str'}, + 'path': {'key': 'Path', 'type': 'str'}, + 'file_id': {'key': 'FileId', 'type': 'str'}, + 'parent_id': {'key': 'ParentId', 'type': 'str'}, + 'session_id': {'key': 'SessionId', 'type': 'str'}, + 'client_ip': {'key': 'ClientIp', 'type': 'str'}, + 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123'}, + 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123'}, + } + _xml_map = { + 'name': 'Handle' + } + + def __init__( + self, + **kwargs + ): + super(HandleItem, self).__init__(**kwargs) + self.handle_id = kwargs['handle_id'] + self.path = kwargs['path'] + self.file_id = kwargs['file_id'] + self.parent_id = kwargs.get('parent_id', None) + self.session_id = kwargs['session_id'] + self.client_ip = kwargs['client_ip'] + self.open_time = kwargs['open_time'] + self.last_reconnect_time = kwargs.get('last_reconnect_time', None) + + +class LeaseAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param lease_id: If specified, the operation only succeeds if the resource's lease is active + and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': 'leaseId', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = kwargs.get('lease_id', None) + + +class ListFilesAndDirectoriesSegmentResponse(msrest.serialization.Model): + """An enumeration of directories and files. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param share_name: Required. + :type share_name: str + :param share_snapshot: + :type share_snapshot: str + :param directory_path: Required. + :type directory_path: str + :param prefix: Required. + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param segment: Required. Abstract for entries that can be listed from Directory. + :type segment: ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment + :param next_marker: Required. + :type next_marker: str + :param directory_id: + :type directory_id: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'share_name': {'required': True}, + 'directory_path': {'required': True}, + 'prefix': {'required': True}, + 'segment': {'required': True}, + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'attr': True}}, + 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'attr': True}}, + 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment'}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + 'directory_id': {'key': 'DirectoryId', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + **kwargs + ): + super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs['service_endpoint'] + self.share_name = kwargs['share_name'] + self.share_snapshot = kwargs.get('share_snapshot', None) + self.directory_path = kwargs['directory_path'] + self.prefix = kwargs['prefix'] + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.segment = kwargs['segment'] + self.next_marker = kwargs['next_marker'] + self.directory_id = kwargs.get('directory_id', None) + + +class ListHandlesResponse(msrest.serialization.Model): + """An enumeration of handles. + + All required parameters must be populated in order to send to Azure. + + :param handle_list: + :type handle_list: list[~azure.storage.fileshare.models.HandleItem] + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'wrapped': True, 'itemsName': 'Handle'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + **kwargs + ): + super(ListHandlesResponse, self).__init__(**kwargs) + self.handle_list = kwargs.get('handle_list', None) + self.next_marker = kwargs['next_marker'] + + +class ListSharesResponse(msrest.serialization.Model): + """An enumeration of shares. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param share_items: + :type share_items: list[~azure.storage.fileshare.models.ShareItemInternal] + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'share_items': {'key': 'ShareItems', 'type': '[ShareItemInternal]', 'xml': {'name': 'Shares', 'wrapped': True, 'itemsName': 'Share'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + **kwargs + ): + super(ListSharesResponse, self).__init__(**kwargs) + self.service_endpoint = kwargs['service_endpoint'] + self.prefix = kwargs.get('prefix', None) + self.marker = kwargs.get('marker', None) + self.max_results = kwargs.get('max_results', None) + self.share_items = kwargs.get('share_items', None) + self.next_marker = kwargs['next_marker'] + + +class Metrics(msrest.serialization.Model): + """Storage Analytics metrics for file service. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of Storage Analytics to configure. + :type version: str + :param enabled: Required. Indicates whether metrics are enabled for the File service. + :type enabled: bool + :param include_apis: Indicates whether metrics should generate summary statistics for called + API operations. + :type include_apis: bool + :param retention_policy: The retention policy. + :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy + """ + + _validation = { + 'version': {'required': True}, + 'enabled': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str'}, + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, + } + + def __init__( + self, + **kwargs + ): + super(Metrics, self).__init__(**kwargs) + self.version = kwargs['version'] + self.enabled = kwargs['enabled'] + self.include_apis = kwargs.get('include_apis', None) + self.retention_policy = kwargs.get('retention_policy', None) + + +class RetentionPolicy(msrest.serialization.Model): + """The retention policy. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether a retention policy is enabled for the File service. + If false, metrics data is retained, and the user is responsible for deleting it. + :type enabled: bool + :param days: Indicates the number of days that metrics data should be retained. All data older + than this value will be deleted. Metrics data is deleted on a best-effort basis after the + retention period expires. + :type days: int + """ + + _validation = { + 'enabled': {'required': True}, + 'days': {'maximum': 365, 'minimum': 1}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'days': {'key': 'Days', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(RetentionPolicy, self).__init__(**kwargs) + self.enabled = kwargs['enabled'] + self.days = kwargs.get('days', None) + + +class ShareFileRangeList(msrest.serialization.Model): + """The list of file ranges. + + :param ranges: + :type ranges: list[~azure.storage.fileshare.models.FileRange] + :param clear_ranges: + :type clear_ranges: list[~azure.storage.fileshare.models.ClearRange] + """ + + _attribute_map = { + 'ranges': {'key': 'Ranges', 'type': '[FileRange]'}, + 'clear_ranges': {'key': 'ClearRanges', 'type': '[ClearRange]'}, + } + + def __init__( + self, + **kwargs + ): + super(ShareFileRangeList, self).__init__(**kwargs) + self.ranges = kwargs.get('ranges', None) + self.clear_ranges = kwargs.get('clear_ranges', None) + + +class ShareItemInternal(msrest.serialization.Model): + """A listed Azure Storage share item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param snapshot: + :type snapshot: str + :param deleted: + :type deleted: bool + :param version: + :type version: str + :param properties: Required. Properties of a share. + :type properties: ~azure.storage.fileshare.models.SharePropertiesInternal + :param metadata: Dictionary of :code:``. + :type metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'snapshot': {'key': 'Snapshot', 'type': 'str'}, + 'deleted': {'key': 'Deleted', 'type': 'bool'}, + 'version': {'key': 'Version', 'type': 'str'}, + 'properties': {'key': 'Properties', 'type': 'SharePropertiesInternal'}, + 'metadata': {'key': 'Metadata', 'type': '{str}'}, + } + _xml_map = { + 'name': 'Share' + } + + def __init__( + self, + **kwargs + ): + super(ShareItemInternal, self).__init__(**kwargs) + self.name = kwargs['name'] + self.snapshot = kwargs.get('snapshot', None) + self.deleted = kwargs.get('deleted', None) + self.version = kwargs.get('version', None) + self.properties = kwargs['properties'] + self.metadata = kwargs.get('metadata', None) + + +class SharePermission(msrest.serialization.Model): + """A permission (a security descriptor) at the share level. + + All required parameters must be populated in order to send to Azure. + + :param permission: Required. The permission in the Security Descriptor Definition Language + (SDDL). + :type permission: str + """ + + _validation = { + 'permission': {'required': True}, + } + + _attribute_map = { + 'permission': {'key': 'permission', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SharePermission, self).__init__(**kwargs) + self.permission = kwargs['permission'] + + +class SharePropertiesInternal(msrest.serialization.Model): + """Properties of a share. + + All required parameters must be populated in order to send to Azure. + + :param last_modified: Required. + :type last_modified: ~datetime.datetime + :param etag: Required. + :type etag: str + :param quota: Required. + :type quota: int + :param provisioned_iops: + :type provisioned_iops: int + :param provisioned_ingress_m_bps: + :type provisioned_ingress_m_bps: int + :param provisioned_egress_m_bps: + :type provisioned_egress_m_bps: int + :param next_allowed_quota_downgrade_time: + :type next_allowed_quota_downgrade_time: ~datetime.datetime + :param deleted_time: + :type deleted_time: ~datetime.datetime + :param remaining_retention_days: + :type remaining_retention_days: int + :param access_tier: + :type access_tier: str + :param access_tier_change_time: + :type access_tier_change_time: ~datetime.datetime + :param access_tier_transition_state: + :type access_tier_transition_state: str + :param lease_status: The current lease status of the share. Possible values include: "locked", + "unlocked". + :type lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType + :param lease_state: Lease state of the share. Possible values include: "available", "leased", + "expired", "breaking", "broken". + :type lease_state: str or ~azure.storage.fileshare.models.LeaseStateType + :param lease_duration: When a share is leased, specifies whether the lease is of infinite or + fixed duration. Possible values include: "infinite", "fixed". + :type lease_duration: str or ~azure.storage.fileshare.models.LeaseDurationType + :param enabled_protocols: + :type enabled_protocols: str + :param root_squash: Possible values include: "NoRootSquash", "RootSquash", "AllSquash". + :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + 'quota': {'required': True}, + } + + _attribute_map = { + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, + 'etag': {'key': 'Etag', 'type': 'str'}, + 'quota': {'key': 'Quota', 'type': 'int'}, + 'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int'}, + 'provisioned_ingress_m_bps': {'key': 'ProvisionedIngressMBps', 'type': 'int'}, + 'provisioned_egress_m_bps': {'key': 'ProvisionedEgressMBps', 'type': 'int'}, + 'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123'}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, + 'access_tier': {'key': 'AccessTier', 'type': 'str'}, + 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, + 'access_tier_transition_state': {'key': 'AccessTierTransitionState', 'type': 'str'}, + 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, + 'lease_state': {'key': 'LeaseState', 'type': 'str'}, + 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, + 'enabled_protocols': {'key': 'EnabledProtocols', 'type': 'str'}, + 'root_squash': {'key': 'RootSquash', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SharePropertiesInternal, self).__init__(**kwargs) + self.last_modified = kwargs['last_modified'] + self.etag = kwargs['etag'] + self.quota = kwargs['quota'] + self.provisioned_iops = kwargs.get('provisioned_iops', None) + self.provisioned_ingress_m_bps = kwargs.get('provisioned_ingress_m_bps', None) + self.provisioned_egress_m_bps = kwargs.get('provisioned_egress_m_bps', None) + self.next_allowed_quota_downgrade_time = kwargs.get('next_allowed_quota_downgrade_time', None) + self.deleted_time = kwargs.get('deleted_time', None) + self.remaining_retention_days = kwargs.get('remaining_retention_days', None) + self.access_tier = kwargs.get('access_tier', None) + self.access_tier_change_time = kwargs.get('access_tier_change_time', None) + self.access_tier_transition_state = kwargs.get('access_tier_transition_state', None) + self.lease_status = kwargs.get('lease_status', None) + self.lease_state = kwargs.get('lease_state', None) + self.lease_duration = kwargs.get('lease_duration', None) + self.enabled_protocols = kwargs.get('enabled_protocols', None) + self.root_squash = kwargs.get('root_squash', None) + + +class ShareProtocolSettings(msrest.serialization.Model): + """Protocol settings. + + :param smb: Settings for SMB protocol. + :type smb: ~azure.storage.fileshare.models.ShareSmbSettings + """ + + _attribute_map = { + 'smb': {'key': 'Smb', 'type': 'ShareSmbSettings'}, + } + _xml_map = { + 'name': 'ProtocolSettings' + } + + def __init__( + self, + **kwargs + ): + super(ShareProtocolSettings, self).__init__(**kwargs) + self.smb = kwargs.get('smb', None) + + +class ShareSmbSettings(msrest.serialization.Model): + """Settings for SMB protocol. + + :param multichannel: Settings for SMB Multichannel. + :type multichannel: ~azure.storage.fileshare.models.SmbMultichannel + """ + + _attribute_map = { + 'multichannel': {'key': 'Multichannel', 'type': 'SmbMultichannel'}, + } + _xml_map = { + 'name': 'SMB' + } + + def __init__( + self, + **kwargs + ): + super(ShareSmbSettings, self).__init__(**kwargs) + self.multichannel = kwargs.get('multichannel', None) + + +class ShareStats(msrest.serialization.Model): + """Stats for the share. + + All required parameters must be populated in order to send to Azure. + + :param share_usage_bytes: Required. The approximate size of the data stored in bytes. Note that + this value may not include all recently created or recently resized files. + :type share_usage_bytes: int + """ + + _validation = { + 'share_usage_bytes': {'required': True}, + } + + _attribute_map = { + 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(ShareStats, self).__init__(**kwargs) + self.share_usage_bytes = kwargs['share_usage_bytes'] + + +class SignedIdentifier(msrest.serialization.Model): + """Signed identifier. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A unique id. + :type id: str + :param access_policy: The access policy. + :type access_policy: ~azure.storage.fileshare.models.AccessPolicy + """ + + _validation = { + 'id': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, + } + + def __init__( + self, + **kwargs + ): + super(SignedIdentifier, self).__init__(**kwargs) + self.id = kwargs['id'] + self.access_policy = kwargs.get('access_policy', None) + + +class SmbMultichannel(msrest.serialization.Model): + """Settings for SMB multichannel. + + :param enabled: If SMB multichannel is enabled. + :type enabled: bool + """ + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + } + _xml_map = { + 'name': 'Multichannel' + } + + def __init__( + self, + **kwargs + ): + super(SmbMultichannel, self).__init__(**kwargs) + self.enabled = kwargs.get('enabled', None) + + +class SourceModifiedAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param source_if_match_crc64: Specify the crc64 value to operate only on range with a matching + crc64 checksum. + :type source_if_match_crc64: bytearray + :param source_if_none_match_crc64: Specify the crc64 value to operate only on range without a + matching crc64 checksum. + :type source_if_none_match_crc64: bytearray + """ + + _attribute_map = { + 'source_if_match_crc64': {'key': 'sourceIfMatchCrc64', 'type': 'bytearray'}, + 'source_if_none_match_crc64': {'key': 'sourceIfNoneMatchCrc64', 'type': 'bytearray'}, + } + + def __init__( + self, + **kwargs + ): + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_match_crc64 = kwargs.get('source_if_match_crc64', None) + self.source_if_none_match_crc64 = kwargs.get('source_if_none_match_crc64', None) + + +class StorageError(msrest.serialization.Model): + """StorageError. + + :param message: + :type message: str + """ + + _attribute_map = { + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageError, self).__init__(**kwargs) + self.message = kwargs.get('message', None) + + +class StorageServiceProperties(msrest.serialization.Model): + """Storage service properties. + + :param hour_metrics: A summary of request statistics grouped by API in hourly aggregates for + files. + :type hour_metrics: ~azure.storage.fileshare.models.Metrics + :param minute_metrics: A summary of request statistics grouped by API in minute aggregates for + files. + :type minute_metrics: ~azure.storage.fileshare.models.Metrics + :param cors: The set of CORS rules. + :type cors: list[~azure.storage.fileshare.models.CorsRule] + :param protocol: Protocol settings. + :type protocol: ~azure.storage.fileshare.models.ShareProtocolSettings + """ + + _attribute_map = { + 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, + 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, + 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, + 'protocol': {'key': 'Protocol', 'type': 'ShareProtocolSettings'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageServiceProperties, self).__init__(**kwargs) + self.hour_metrics = kwargs.get('hour_metrics', None) + self.minute_metrics = kwargs.get('minute_metrics', None) + self.cors = kwargs.get('cors', None) + self.protocol = kwargs.get('protocol', None) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_models_py3.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_models_py3.py new file mode 100644 index 0000000..c95e0af --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/models/_models_py3.py @@ -0,0 +1,1264 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +from typing import Dict, List, Optional, Union + +from azure.core.exceptions import HttpResponseError +import msrest.serialization + +from ._azure_file_storage_enums import * + + +class AccessPolicy(msrest.serialization.Model): + """An Access policy. + + :param start: The date-time the policy is active. + :type start: str + :param expiry: The date-time the policy expires. + :type expiry: str + :param permission: The permissions for the ACL policy. + :type permission: str + """ + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'str'}, + 'expiry': {'key': 'Expiry', 'type': 'str'}, + 'permission': {'key': 'Permission', 'type': 'str'}, + } + + def __init__( + self, + *, + start: Optional[str] = None, + expiry: Optional[str] = None, + permission: Optional[str] = None, + **kwargs + ): + super(AccessPolicy, self).__init__(**kwargs) + self.start = start + self.expiry = expiry + self.permission = permission + + +class ClearRange(msrest.serialization.Model): + """ClearRange. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. + :type start: long + :param end: Required. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long', 'xml': {'name': 'Start'}}, + 'end': {'key': 'End', 'type': 'long', 'xml': {'name': 'End'}}, + } + _xml_map = { + 'name': 'ClearRange' + } + + def __init__( + self, + *, + start: int, + end: int, + **kwargs + ): + super(ClearRange, self).__init__(**kwargs) + self.start = start + self.end = end + + +class CopyFileSmbInfo(msrest.serialization.Model): + """Parameter group. + + :param file_permission_copy_mode: Specifies the option to copy file security descriptor from + source file or to set it using the value which is defined by the header value of + x-ms-file-permission or x-ms-file-permission-key. Possible values include: "source", + "override". + :type file_permission_copy_mode: str or ~azure.storage.fileshare.models.PermissionCopyModeType + :param ignore_read_only: Specifies the option to overwrite the target file if it already exists + and has read-only attribute set. + :type ignore_read_only: bool + :param file_attributes: Specifies either the option to copy file attributes from a source + file(source) to a target file or a list of attributes to set on a target file. + :type file_attributes: str + :param file_creation_time: Specifies either the option to copy file creation time from a source + file(source) to a target file or a time value in ISO 8601 format to set as creation time on a + target file. + :type file_creation_time: str + :param file_last_write_time: Specifies either the option to copy file last write time from a + source file(source) to a target file or a time value in ISO 8601 format to set as last write + time on a target file. + :type file_last_write_time: str + :param set_archive_attribute: Specifies the option to set archive attribute on a target file. + True means archive attribute will be set on a target file despite attribute overrides or a + source file state. + :type set_archive_attribute: bool + """ + + _attribute_map = { + 'file_permission_copy_mode': {'key': 'filePermissionCopyMode', 'type': 'str'}, + 'ignore_read_only': {'key': 'ignoreReadOnly', 'type': 'bool'}, + 'file_attributes': {'key': 'fileAttributes', 'type': 'str'}, + 'file_creation_time': {'key': 'fileCreationTime', 'type': 'str'}, + 'file_last_write_time': {'key': 'fileLastWriteTime', 'type': 'str'}, + 'set_archive_attribute': {'key': 'setArchiveAttribute', 'type': 'bool'}, + } + + def __init__( + self, + *, + file_permission_copy_mode: Optional[Union[str, "PermissionCopyModeType"]] = None, + ignore_read_only: Optional[bool] = None, + file_attributes: Optional[str] = None, + file_creation_time: Optional[str] = None, + file_last_write_time: Optional[str] = None, + set_archive_attribute: Optional[bool] = None, + **kwargs + ): + super(CopyFileSmbInfo, self).__init__(**kwargs) + self.file_permission_copy_mode = file_permission_copy_mode + self.ignore_read_only = ignore_read_only + self.file_attributes = file_attributes + self.file_creation_time = file_creation_time + self.file_last_write_time = file_last_write_time + self.set_archive_attribute = set_archive_attribute + + +class CorsRule(msrest.serialization.Model): + """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param allowed_origins: Required. The origin domains that are permitted to make a request + against the storage service via CORS. The origin domain is the domain from which the request + originates. Note that the origin must be an exact case-sensitive match with the origin that the + user age sends to the service. You can also use the wildcard character '*' to allow all origin + domains to make requests via CORS. + :type allowed_origins: str + :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may + use for a CORS request. (comma separated). + :type allowed_methods: str + :param allowed_headers: Required. The request headers that the origin domain may specify on the + CORS request. + :type allowed_headers: str + :param exposed_headers: Required. The response headers that may be sent in the response to the + CORS request and exposed by the browser to the request issuer. + :type exposed_headers: str + :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the + preflight OPTIONS request. + :type max_age_in_seconds: int + """ + + _validation = { + 'allowed_origins': {'required': True}, + 'allowed_methods': {'required': True}, + 'allowed_headers': {'required': True}, + 'exposed_headers': {'required': True}, + 'max_age_in_seconds': {'required': True, 'minimum': 0}, + } + + _attribute_map = { + 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, + 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, + 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, + 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, + 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, + } + + def __init__( + self, + *, + allowed_origins: str, + allowed_methods: str, + allowed_headers: str, + exposed_headers: str, + max_age_in_seconds: int, + **kwargs + ): + super(CorsRule, self).__init__(**kwargs) + self.allowed_origins = allowed_origins + self.allowed_methods = allowed_methods + self.allowed_headers = allowed_headers + self.exposed_headers = exposed_headers + self.max_age_in_seconds = max_age_in_seconds + + +class DirectoryItem(msrest.serialization.Model): + """A listed directory item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param file_id: + :type file_id: str + :param properties: File properties. + :type properties: ~azure.storage.fileshare.models.FileProperty + :param attributes: + :type attributes: str + :param permission_key: + :type permission_key: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'file_id': {'key': 'FileId', 'type': 'str'}, + 'properties': {'key': 'Properties', 'type': 'FileProperty'}, + 'attributes': {'key': 'Attributes', 'type': 'str'}, + 'permission_key': {'key': 'PermissionKey', 'type': 'str'}, + } + _xml_map = { + 'name': 'Directory' + } + + def __init__( + self, + *, + name: str, + file_id: Optional[str] = None, + properties: Optional["FileProperty"] = None, + attributes: Optional[str] = None, + permission_key: Optional[str] = None, + **kwargs + ): + super(DirectoryItem, self).__init__(**kwargs) + self.name = name + self.file_id = file_id + self.properties = properties + self.attributes = attributes + self.permission_key = permission_key + + +class FileHTTPHeaders(msrest.serialization.Model): + """Parameter group. + + :param file_content_type: Sets the MIME content type of the file. The default type is + 'application/octet-stream'. + :type file_content_type: str + :param file_content_encoding: Specifies which content encodings have been applied to the file. + :type file_content_encoding: str + :param file_content_language: Specifies the natural languages used by this resource. + :type file_content_language: str + :param file_cache_control: Sets the file's cache control. The File service stores this value + but does not use or modify it. + :type file_cache_control: str + :param file_content_md5: Sets the file's MD5 hash. + :type file_content_md5: bytearray + :param file_content_disposition: Sets the file's Content-Disposition header. + :type file_content_disposition: str + """ + + _attribute_map = { + 'file_content_type': {'key': 'fileContentType', 'type': 'str'}, + 'file_content_encoding': {'key': 'fileContentEncoding', 'type': 'str'}, + 'file_content_language': {'key': 'fileContentLanguage', 'type': 'str'}, + 'file_cache_control': {'key': 'fileCacheControl', 'type': 'str'}, + 'file_content_md5': {'key': 'fileContentMD5', 'type': 'bytearray'}, + 'file_content_disposition': {'key': 'fileContentDisposition', 'type': 'str'}, + } + + def __init__( + self, + *, + file_content_type: Optional[str] = None, + file_content_encoding: Optional[str] = None, + file_content_language: Optional[str] = None, + file_cache_control: Optional[str] = None, + file_content_md5: Optional[bytearray] = None, + file_content_disposition: Optional[str] = None, + **kwargs + ): + super(FileHTTPHeaders, self).__init__(**kwargs) + self.file_content_type = file_content_type + self.file_content_encoding = file_content_encoding + self.file_content_language = file_content_language + self.file_cache_control = file_cache_control + self.file_content_md5 = file_content_md5 + self.file_content_disposition = file_content_disposition + + +class FileItem(msrest.serialization.Model): + """A listed file item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param file_id: + :type file_id: str + :param properties: Required. File properties. + :type properties: ~azure.storage.fileshare.models.FileProperty + :param attributes: + :type attributes: str + :param permission_key: + :type permission_key: str + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'file_id': {'key': 'FileId', 'type': 'str'}, + 'properties': {'key': 'Properties', 'type': 'FileProperty'}, + 'attributes': {'key': 'Attributes', 'type': 'str'}, + 'permission_key': {'key': 'PermissionKey', 'type': 'str'}, + } + _xml_map = { + 'name': 'File' + } + + def __init__( + self, + *, + name: str, + properties: "FileProperty", + file_id: Optional[str] = None, + attributes: Optional[str] = None, + permission_key: Optional[str] = None, + **kwargs + ): + super(FileItem, self).__init__(**kwargs) + self.name = name + self.file_id = file_id + self.properties = properties + self.attributes = attributes + self.permission_key = permission_key + + +class FileProperty(msrest.serialization.Model): + """File properties. + + All required parameters must be populated in order to send to Azure. + + :param content_length: Required. Content length of the file. This value may not be up-to-date + since an SMB client may have modified the file locally. The value of Content-Length may not + reflect that fact until the handle is closed or the op-lock is broken. To retrieve current + property values, call Get File Properties. + :type content_length: long + :param creation_time: + :type creation_time: ~datetime.datetime + :param last_access_time: + :type last_access_time: ~datetime.datetime + :param last_write_time: + :type last_write_time: ~datetime.datetime + :param change_time: + :type change_time: ~datetime.datetime + :param last_modified: + :type last_modified: ~datetime.datetime + :param etag: + :type etag: str + """ + + _validation = { + 'content_length': {'required': True}, + } + + _attribute_map = { + 'content_length': {'key': 'Content-Length', 'type': 'long'}, + 'creation_time': {'key': 'CreationTime', 'type': 'iso-8601'}, + 'last_access_time': {'key': 'LastAccessTime', 'type': 'iso-8601'}, + 'last_write_time': {'key': 'LastWriteTime', 'type': 'iso-8601'}, + 'change_time': {'key': 'ChangeTime', 'type': 'iso-8601'}, + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, + 'etag': {'key': 'Etag', 'type': 'str'}, + } + + def __init__( + self, + *, + content_length: int, + creation_time: Optional[datetime.datetime] = None, + last_access_time: Optional[datetime.datetime] = None, + last_write_time: Optional[datetime.datetime] = None, + change_time: Optional[datetime.datetime] = None, + last_modified: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + **kwargs + ): + super(FileProperty, self).__init__(**kwargs) + self.content_length = content_length + self.creation_time = creation_time + self.last_access_time = last_access_time + self.last_write_time = last_write_time + self.change_time = change_time + self.last_modified = last_modified + self.etag = etag + + +class FileRange(msrest.serialization.Model): + """An Azure Storage file range. + + All required parameters must be populated in order to send to Azure. + + :param start: Required. Start of the range. + :type start: long + :param end: Required. End of the range. + :type end: long + """ + + _validation = { + 'start': {'required': True}, + 'end': {'required': True}, + } + + _attribute_map = { + 'start': {'key': 'Start', 'type': 'long'}, + 'end': {'key': 'End', 'type': 'long'}, + } + _xml_map = { + 'name': 'Range' + } + + def __init__( + self, + *, + start: int, + end: int, + **kwargs + ): + super(FileRange, self).__init__(**kwargs) + self.start = start + self.end = end + + +class FilesAndDirectoriesListSegment(msrest.serialization.Model): + """Abstract for entries that can be listed from Directory. + + All required parameters must be populated in order to send to Azure. + + :param directory_items: Required. + :type directory_items: list[~azure.storage.fileshare.models.DirectoryItem] + :param file_items: Required. + :type file_items: list[~azure.storage.fileshare.models.FileItem] + """ + + _validation = { + 'directory_items': {'required': True}, + 'file_items': {'required': True}, + } + + _attribute_map = { + 'directory_items': {'key': 'DirectoryItems', 'type': '[DirectoryItem]'}, + 'file_items': {'key': 'FileItems', 'type': '[FileItem]'}, + } + _xml_map = { + 'name': 'Entries' + } + + def __init__( + self, + *, + directory_items: List["DirectoryItem"], + file_items: List["FileItem"], + **kwargs + ): + super(FilesAndDirectoriesListSegment, self).__init__(**kwargs) + self.directory_items = directory_items + self.file_items = file_items + + +class HandleItem(msrest.serialization.Model): + """A listed Azure Storage handle item. + + All required parameters must be populated in order to send to Azure. + + :param handle_id: Required. XSMB service handle ID. + :type handle_id: str + :param path: Required. File or directory name including full path starting from share root. + :type path: str + :param file_id: Required. FileId uniquely identifies the file or directory. + :type file_id: str + :param parent_id: ParentId uniquely identifies the parent directory of the object. + :type parent_id: str + :param session_id: Required. SMB session ID in context of which the file handle was opened. + :type session_id: str + :param client_ip: Required. Client IP that opened the handle. + :type client_ip: str + :param open_time: Required. Time when the session that previously opened the handle has last + been reconnected. (UTC). + :type open_time: ~datetime.datetime + :param last_reconnect_time: Time handle was last connected to (UTC). + :type last_reconnect_time: ~datetime.datetime + """ + + _validation = { + 'handle_id': {'required': True}, + 'path': {'required': True}, + 'file_id': {'required': True}, + 'session_id': {'required': True}, + 'client_ip': {'required': True}, + 'open_time': {'required': True}, + } + + _attribute_map = { + 'handle_id': {'key': 'HandleId', 'type': 'str'}, + 'path': {'key': 'Path', 'type': 'str'}, + 'file_id': {'key': 'FileId', 'type': 'str'}, + 'parent_id': {'key': 'ParentId', 'type': 'str'}, + 'session_id': {'key': 'SessionId', 'type': 'str'}, + 'client_ip': {'key': 'ClientIp', 'type': 'str'}, + 'open_time': {'key': 'OpenTime', 'type': 'rfc-1123'}, + 'last_reconnect_time': {'key': 'LastReconnectTime', 'type': 'rfc-1123'}, + } + _xml_map = { + 'name': 'Handle' + } + + def __init__( + self, + *, + handle_id: str, + path: str, + file_id: str, + session_id: str, + client_ip: str, + open_time: datetime.datetime, + parent_id: Optional[str] = None, + last_reconnect_time: Optional[datetime.datetime] = None, + **kwargs + ): + super(HandleItem, self).__init__(**kwargs) + self.handle_id = handle_id + self.path = path + self.file_id = file_id + self.parent_id = parent_id + self.session_id = session_id + self.client_ip = client_ip + self.open_time = open_time + self.last_reconnect_time = last_reconnect_time + + +class LeaseAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param lease_id: If specified, the operation only succeeds if the resource's lease is active + and matches this ID. + :type lease_id: str + """ + + _attribute_map = { + 'lease_id': {'key': 'leaseId', 'type': 'str'}, + } + + def __init__( + self, + *, + lease_id: Optional[str] = None, + **kwargs + ): + super(LeaseAccessConditions, self).__init__(**kwargs) + self.lease_id = lease_id + + +class ListFilesAndDirectoriesSegmentResponse(msrest.serialization.Model): + """An enumeration of directories and files. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param share_name: Required. + :type share_name: str + :param share_snapshot: + :type share_snapshot: str + :param directory_path: Required. + :type directory_path: str + :param prefix: Required. + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param segment: Required. Abstract for entries that can be listed from Directory. + :type segment: ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment + :param next_marker: Required. + :type next_marker: str + :param directory_id: + :type directory_id: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'share_name': {'required': True}, + 'directory_path': {'required': True}, + 'prefix': {'required': True}, + 'segment': {'required': True}, + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'share_name': {'key': 'ShareName', 'type': 'str', 'xml': {'attr': True}}, + 'share_snapshot': {'key': 'ShareSnapshot', 'type': 'str', 'xml': {'attr': True}}, + 'directory_path': {'key': 'DirectoryPath', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'segment': {'key': 'Segment', 'type': 'FilesAndDirectoriesListSegment'}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + 'directory_id': {'key': 'DirectoryId', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + *, + service_endpoint: str, + share_name: str, + directory_path: str, + prefix: str, + segment: "FilesAndDirectoriesListSegment", + next_marker: str, + share_snapshot: Optional[str] = None, + marker: Optional[str] = None, + max_results: Optional[int] = None, + directory_id: Optional[str] = None, + **kwargs + ): + super(ListFilesAndDirectoriesSegmentResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.share_name = share_name + self.share_snapshot = share_snapshot + self.directory_path = directory_path + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.segment = segment + self.next_marker = next_marker + self.directory_id = directory_id + + +class ListHandlesResponse(msrest.serialization.Model): + """An enumeration of handles. + + All required parameters must be populated in order to send to Azure. + + :param handle_list: + :type handle_list: list[~azure.storage.fileshare.models.HandleItem] + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'handle_list': {'key': 'HandleList', 'type': '[HandleItem]', 'xml': {'name': 'Entries', 'wrapped': True, 'itemsName': 'Handle'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + *, + next_marker: str, + handle_list: Optional[List["HandleItem"]] = None, + **kwargs + ): + super(ListHandlesResponse, self).__init__(**kwargs) + self.handle_list = handle_list + self.next_marker = next_marker + + +class ListSharesResponse(msrest.serialization.Model): + """An enumeration of shares. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param prefix: + :type prefix: str + :param marker: + :type marker: str + :param max_results: + :type max_results: int + :param share_items: + :type share_items: list[~azure.storage.fileshare.models.ShareItemInternal] + :param next_marker: Required. + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'next_marker': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'share_items': {'key': 'ShareItems', 'type': '[ShareItemInternal]', 'xml': {'name': 'Shares', 'wrapped': True, 'itemsName': 'Share'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__( + self, + *, + service_endpoint: str, + next_marker: str, + prefix: Optional[str] = None, + marker: Optional[str] = None, + max_results: Optional[int] = None, + share_items: Optional[List["ShareItemInternal"]] = None, + **kwargs + ): + super(ListSharesResponse, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.share_items = share_items + self.next_marker = next_marker + + +class Metrics(msrest.serialization.Model): + """Storage Analytics metrics for file service. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The version of Storage Analytics to configure. + :type version: str + :param enabled: Required. Indicates whether metrics are enabled for the File service. + :type enabled: bool + :param include_apis: Indicates whether metrics should generate summary statistics for called + API operations. + :type include_apis: bool + :param retention_policy: The retention policy. + :type retention_policy: ~azure.storage.fileshare.models.RetentionPolicy + """ + + _validation = { + 'version': {'required': True}, + 'enabled': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str'}, + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, + } + + def __init__( + self, + *, + version: str, + enabled: bool, + include_apis: Optional[bool] = None, + retention_policy: Optional["RetentionPolicy"] = None, + **kwargs + ): + super(Metrics, self).__init__(**kwargs) + self.version = version + self.enabled = enabled + self.include_apis = include_apis + self.retention_policy = retention_policy + + +class RetentionPolicy(msrest.serialization.Model): + """The retention policy. + + All required parameters must be populated in order to send to Azure. + + :param enabled: Required. Indicates whether a retention policy is enabled for the File service. + If false, metrics data is retained, and the user is responsible for deleting it. + :type enabled: bool + :param days: Indicates the number of days that metrics data should be retained. All data older + than this value will be deleted. Metrics data is deleted on a best-effort basis after the + retention period expires. + :type days: int + """ + + _validation = { + 'enabled': {'required': True}, + 'days': {'maximum': 365, 'minimum': 1}, + } + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'days': {'key': 'Days', 'type': 'int'}, + } + + def __init__( + self, + *, + enabled: bool, + days: Optional[int] = None, + **kwargs + ): + super(RetentionPolicy, self).__init__(**kwargs) + self.enabled = enabled + self.days = days + + +class ShareFileRangeList(msrest.serialization.Model): + """The list of file ranges. + + :param ranges: + :type ranges: list[~azure.storage.fileshare.models.FileRange] + :param clear_ranges: + :type clear_ranges: list[~azure.storage.fileshare.models.ClearRange] + """ + + _attribute_map = { + 'ranges': {'key': 'Ranges', 'type': '[FileRange]'}, + 'clear_ranges': {'key': 'ClearRanges', 'type': '[ClearRange]'}, + } + + def __init__( + self, + *, + ranges: Optional[List["FileRange"]] = None, + clear_ranges: Optional[List["ClearRange"]] = None, + **kwargs + ): + super(ShareFileRangeList, self).__init__(**kwargs) + self.ranges = ranges + self.clear_ranges = clear_ranges + + +class ShareItemInternal(msrest.serialization.Model): + """A listed Azure Storage share item. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param snapshot: + :type snapshot: str + :param deleted: + :type deleted: bool + :param version: + :type version: str + :param properties: Required. Properties of a share. + :type properties: ~azure.storage.fileshare.models.SharePropertiesInternal + :param metadata: Dictionary of :code:``. + :type metadata: dict[str, str] + """ + + _validation = { + 'name': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'snapshot': {'key': 'Snapshot', 'type': 'str'}, + 'deleted': {'key': 'Deleted', 'type': 'bool'}, + 'version': {'key': 'Version', 'type': 'str'}, + 'properties': {'key': 'Properties', 'type': 'SharePropertiesInternal'}, + 'metadata': {'key': 'Metadata', 'type': '{str}'}, + } + _xml_map = { + 'name': 'Share' + } + + def __init__( + self, + *, + name: str, + properties: "SharePropertiesInternal", + snapshot: Optional[str] = None, + deleted: Optional[bool] = None, + version: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs + ): + super(ShareItemInternal, self).__init__(**kwargs) + self.name = name + self.snapshot = snapshot + self.deleted = deleted + self.version = version + self.properties = properties + self.metadata = metadata + + +class SharePermission(msrest.serialization.Model): + """A permission (a security descriptor) at the share level. + + All required parameters must be populated in order to send to Azure. + + :param permission: Required. The permission in the Security Descriptor Definition Language + (SDDL). + :type permission: str + """ + + _validation = { + 'permission': {'required': True}, + } + + _attribute_map = { + 'permission': {'key': 'permission', 'type': 'str'}, + } + + def __init__( + self, + *, + permission: str, + **kwargs + ): + super(SharePermission, self).__init__(**kwargs) + self.permission = permission + + +class SharePropertiesInternal(msrest.serialization.Model): + """Properties of a share. + + All required parameters must be populated in order to send to Azure. + + :param last_modified: Required. + :type last_modified: ~datetime.datetime + :param etag: Required. + :type etag: str + :param quota: Required. + :type quota: int + :param provisioned_iops: + :type provisioned_iops: int + :param provisioned_ingress_m_bps: + :type provisioned_ingress_m_bps: int + :param provisioned_egress_m_bps: + :type provisioned_egress_m_bps: int + :param next_allowed_quota_downgrade_time: + :type next_allowed_quota_downgrade_time: ~datetime.datetime + :param deleted_time: + :type deleted_time: ~datetime.datetime + :param remaining_retention_days: + :type remaining_retention_days: int + :param access_tier: + :type access_tier: str + :param access_tier_change_time: + :type access_tier_change_time: ~datetime.datetime + :param access_tier_transition_state: + :type access_tier_transition_state: str + :param lease_status: The current lease status of the share. Possible values include: "locked", + "unlocked". + :type lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType + :param lease_state: Lease state of the share. Possible values include: "available", "leased", + "expired", "breaking", "broken". + :type lease_state: str or ~azure.storage.fileshare.models.LeaseStateType + :param lease_duration: When a share is leased, specifies whether the lease is of infinite or + fixed duration. Possible values include: "infinite", "fixed". + :type lease_duration: str or ~azure.storage.fileshare.models.LeaseDurationType + :param enabled_protocols: + :type enabled_protocols: str + :param root_squash: Possible values include: "NoRootSquash", "RootSquash", "AllSquash". + :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash + """ + + _validation = { + 'last_modified': {'required': True}, + 'etag': {'required': True}, + 'quota': {'required': True}, + } + + _attribute_map = { + 'last_modified': {'key': 'Last-Modified', 'type': 'rfc-1123'}, + 'etag': {'key': 'Etag', 'type': 'str'}, + 'quota': {'key': 'Quota', 'type': 'int'}, + 'provisioned_iops': {'key': 'ProvisionedIops', 'type': 'int'}, + 'provisioned_ingress_m_bps': {'key': 'ProvisionedIngressMBps', 'type': 'int'}, + 'provisioned_egress_m_bps': {'key': 'ProvisionedEgressMBps', 'type': 'int'}, + 'next_allowed_quota_downgrade_time': {'key': 'NextAllowedQuotaDowngradeTime', 'type': 'rfc-1123'}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123'}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int'}, + 'access_tier': {'key': 'AccessTier', 'type': 'str'}, + 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123'}, + 'access_tier_transition_state': {'key': 'AccessTierTransitionState', 'type': 'str'}, + 'lease_status': {'key': 'LeaseStatus', 'type': 'str'}, + 'lease_state': {'key': 'LeaseState', 'type': 'str'}, + 'lease_duration': {'key': 'LeaseDuration', 'type': 'str'}, + 'enabled_protocols': {'key': 'EnabledProtocols', 'type': 'str'}, + 'root_squash': {'key': 'RootSquash', 'type': 'str'}, + } + + def __init__( + self, + *, + last_modified: datetime.datetime, + etag: str, + quota: int, + provisioned_iops: Optional[int] = None, + provisioned_ingress_m_bps: Optional[int] = None, + provisioned_egress_m_bps: Optional[int] = None, + next_allowed_quota_downgrade_time: Optional[datetime.datetime] = None, + deleted_time: Optional[datetime.datetime] = None, + remaining_retention_days: Optional[int] = None, + access_tier: Optional[str] = None, + access_tier_change_time: Optional[datetime.datetime] = None, + access_tier_transition_state: Optional[str] = None, + lease_status: Optional[Union[str, "LeaseStatusType"]] = None, + lease_state: Optional[Union[str, "LeaseStateType"]] = None, + lease_duration: Optional[Union[str, "LeaseDurationType"]] = None, + enabled_protocols: Optional[str] = None, + root_squash: Optional[Union[str, "ShareRootSquash"]] = None, + **kwargs + ): + super(SharePropertiesInternal, self).__init__(**kwargs) + self.last_modified = last_modified + self.etag = etag + self.quota = quota + self.provisioned_iops = provisioned_iops + self.provisioned_ingress_m_bps = provisioned_ingress_m_bps + self.provisioned_egress_m_bps = provisioned_egress_m_bps + self.next_allowed_quota_downgrade_time = next_allowed_quota_downgrade_time + self.deleted_time = deleted_time + self.remaining_retention_days = remaining_retention_days + self.access_tier = access_tier + self.access_tier_change_time = access_tier_change_time + self.access_tier_transition_state = access_tier_transition_state + self.lease_status = lease_status + self.lease_state = lease_state + self.lease_duration = lease_duration + self.enabled_protocols = enabled_protocols + self.root_squash = root_squash + + +class ShareProtocolSettings(msrest.serialization.Model): + """Protocol settings. + + :param smb: Settings for SMB protocol. + :type smb: ~azure.storage.fileshare.models.ShareSmbSettings + """ + + _attribute_map = { + 'smb': {'key': 'Smb', 'type': 'ShareSmbSettings'}, + } + _xml_map = { + 'name': 'ProtocolSettings' + } + + def __init__( + self, + *, + smb: Optional["ShareSmbSettings"] = None, + **kwargs + ): + super(ShareProtocolSettings, self).__init__(**kwargs) + self.smb = smb + + +class ShareSmbSettings(msrest.serialization.Model): + """Settings for SMB protocol. + + :param multichannel: Settings for SMB Multichannel. + :type multichannel: ~azure.storage.fileshare.models.SmbMultichannel + """ + + _attribute_map = { + 'multichannel': {'key': 'Multichannel', 'type': 'SmbMultichannel'}, + } + _xml_map = { + 'name': 'SMB' + } + + def __init__( + self, + *, + multichannel: Optional["SmbMultichannel"] = None, + **kwargs + ): + super(ShareSmbSettings, self).__init__(**kwargs) + self.multichannel = multichannel + + +class ShareStats(msrest.serialization.Model): + """Stats for the share. + + All required parameters must be populated in order to send to Azure. + + :param share_usage_bytes: Required. The approximate size of the data stored in bytes. Note that + this value may not include all recently created or recently resized files. + :type share_usage_bytes: int + """ + + _validation = { + 'share_usage_bytes': {'required': True}, + } + + _attribute_map = { + 'share_usage_bytes': {'key': 'ShareUsageBytes', 'type': 'int'}, + } + + def __init__( + self, + *, + share_usage_bytes: int, + **kwargs + ): + super(ShareStats, self).__init__(**kwargs) + self.share_usage_bytes = share_usage_bytes + + +class SignedIdentifier(msrest.serialization.Model): + """Signed identifier. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. A unique id. + :type id: str + :param access_policy: The access policy. + :type access_policy: ~azure.storage.fileshare.models.AccessPolicy + """ + + _validation = { + 'id': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, + } + + def __init__( + self, + *, + id: str, + access_policy: Optional["AccessPolicy"] = None, + **kwargs + ): + super(SignedIdentifier, self).__init__(**kwargs) + self.id = id + self.access_policy = access_policy + + +class SmbMultichannel(msrest.serialization.Model): + """Settings for SMB multichannel. + + :param enabled: If SMB multichannel is enabled. + :type enabled: bool + """ + + _attribute_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + } + _xml_map = { + 'name': 'Multichannel' + } + + def __init__( + self, + *, + enabled: Optional[bool] = None, + **kwargs + ): + super(SmbMultichannel, self).__init__(**kwargs) + self.enabled = enabled + + +class SourceModifiedAccessConditions(msrest.serialization.Model): + """Parameter group. + + :param source_if_match_crc64: Specify the crc64 value to operate only on range with a matching + crc64 checksum. + :type source_if_match_crc64: bytearray + :param source_if_none_match_crc64: Specify the crc64 value to operate only on range without a + matching crc64 checksum. + :type source_if_none_match_crc64: bytearray + """ + + _attribute_map = { + 'source_if_match_crc64': {'key': 'sourceIfMatchCrc64', 'type': 'bytearray'}, + 'source_if_none_match_crc64': {'key': 'sourceIfNoneMatchCrc64', 'type': 'bytearray'}, + } + + def __init__( + self, + *, + source_if_match_crc64: Optional[bytearray] = None, + source_if_none_match_crc64: Optional[bytearray] = None, + **kwargs + ): + super(SourceModifiedAccessConditions, self).__init__(**kwargs) + self.source_if_match_crc64 = source_if_match_crc64 + self.source_if_none_match_crc64 = source_if_none_match_crc64 + + +class StorageError(msrest.serialization.Model): + """StorageError. + + :param message: + :type message: str + """ + + _attribute_map = { + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__( + self, + *, + message: Optional[str] = None, + **kwargs + ): + super(StorageError, self).__init__(**kwargs) + self.message = message + + +class StorageServiceProperties(msrest.serialization.Model): + """Storage service properties. + + :param hour_metrics: A summary of request statistics grouped by API in hourly aggregates for + files. + :type hour_metrics: ~azure.storage.fileshare.models.Metrics + :param minute_metrics: A summary of request statistics grouped by API in minute aggregates for + files. + :type minute_metrics: ~azure.storage.fileshare.models.Metrics + :param cors: The set of CORS rules. + :type cors: list[~azure.storage.fileshare.models.CorsRule] + :param protocol: Protocol settings. + :type protocol: ~azure.storage.fileshare.models.ShareProtocolSettings + """ + + _attribute_map = { + 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, + 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, + 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, + 'protocol': {'key': 'Protocol', 'type': 'ShareProtocolSettings'}, + } + + def __init__( + self, + *, + hour_metrics: Optional["Metrics"] = None, + minute_metrics: Optional["Metrics"] = None, + cors: Optional[List["CorsRule"]] = None, + protocol: Optional["ShareProtocolSettings"] = None, + **kwargs + ): + super(StorageServiceProperties, self).__init__(**kwargs) + self.hour_metrics = hour_metrics + self.minute_metrics = minute_metrics + self.cors = cors + self.protocol = protocol diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/__init__.py new file mode 100644 index 0000000..ba8fb22 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/__init__.py @@ -0,0 +1,19 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._share_operations import ShareOperations +from ._directory_operations import DirectoryOperations +from ._file_operations import FileOperations + +__all__ = [ + 'ServiceOperations', + 'ShareOperations', + 'DirectoryOperations', + 'FileOperations', +] diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_directory_operations.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_directory_operations.py new file mode 100644 index 0000000..8b241b5 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_directory_operations.py @@ -0,0 +1,762 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class DirectoryOperations(object): + """DirectoryOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.fileshare.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create( + self, + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + file_permission="inherit", # type: Optional[str] + file_permission_key=None, # type: Optional[str] + file_attributes="none", # type: str + file_creation_time="now", # type: str + file_last_write_time="now", # type: str + **kwargs # type: Any + ): + # type: (...) -> None + """Creates a new directory under the specified share or parent directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. + :type metadata: str + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param file_attributes: If specified, the provided file attributes shall be set. Default value: + ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. Default value: Now. + :type file_last_write_time: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "directory" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) + response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) + response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) + response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) + response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) + response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{shareName}/{directory}'} # type: ignore + + def get_properties( + self, + sharesnapshot=None, # type: Optional[str] + timeout=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Returns all system properties for the specified directory, and can also be used to check the + existence of a directory. The data returned does not include the files in the directory or any + subdirectories. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "directory" + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) + response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) + response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) + response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) + response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{shareName}/{directory}'} # type: ignore + + def delete( + self, + timeout=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Removes the specified empty directory. Note that the directory must be empty before it can be + deleted. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "directory" + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{shareName}/{directory}'} # type: ignore + + def set_properties( + self, + timeout=None, # type: Optional[int] + file_permission="inherit", # type: Optional[str] + file_permission_key=None, # type: Optional[str] + file_attributes="none", # type: str + file_creation_time="now", # type: str + file_last_write_time="now", # type: str + **kwargs # type: Any + ): + # type: (...) -> None + """Sets properties on the directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param file_attributes: If specified, the provided file attributes shall be set. Default value: + ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. Default value: Now. + :type file_last_write_time: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "directory" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.set_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) + response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) + response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) + response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) + response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) + response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {'url': '/{shareName}/{directory}'} # type: ignore + + def set_metadata( + self, + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Updates user defined metadata for the specified directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. + :type metadata: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "directory" + comp = "metadata" + accept = "application/xml" + + # Construct URL + url = self.set_metadata.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {'url': '/{shareName}/{directory}'} # type: ignore + + def list_files_and_directories_segment( + self, + prefix=None, # type: Optional[str] + sharesnapshot=None, # type: Optional[str] + marker=None, # type: Optional[str] + maxresults=None, # type: Optional[int] + timeout=None, # type: Optional[int] + include=None, # type: Optional[List[Union[str, "_models.ListFilesIncludeType"]]] + include_extended_info=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> "_models.ListFilesAndDirectoriesSegmentResponse" + """Returns a list of files or directories under the specified share or directory. It lists the + contents only for a single level of the directory hierarchy. + + :param prefix: Filters the results to return only entries whose name begins with the specified + prefix. + :type prefix: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. If the request does not + specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 + items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param include: Include this parameter to specify one or more datasets to include in the + response. + :type include: list[str or ~azure.storage.fileshare.models.ListFilesIncludeType] + :param include_extended_info: + :type include_extended_info: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListFilesAndDirectoriesSegmentResponse, or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListFilesAndDirectoriesSegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "directory" + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_files_and_directories_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if include_extended_info is not None: + header_parameters['x-ms-file-extended-info'] = self._serialize.header("include_extended_info", include_extended_info, 'bool') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListFilesAndDirectoriesSegmentResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_files_and_directories_segment.metadata = {'url': '/{shareName}/{directory}'} # type: ignore + + def list_handles( + self, + marker=None, # type: Optional[str] + maxresults=None, # type: Optional[int] + timeout=None, # type: Optional[int] + sharesnapshot=None, # type: Optional[str] + recursive=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> "_models.ListHandlesResponse" + """Lists handles for directory. + + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. If the request does not + specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 + items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param recursive: Specifies operation should apply to the directory specified in the URI, its + files, its subdirectories and their files. + :type recursive: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListHandlesResponse, or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListHandlesResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHandlesResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "listhandles" + accept = "application/xml" + + # Construct URL + url = self.list_handles.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if recursive is not None: + header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListHandlesResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_handles.metadata = {'url': '/{shareName}/{directory}'} # type: ignore + + def force_close_handles( + self, + handle_id, # type: str + timeout=None, # type: Optional[int] + marker=None, # type: Optional[str] + sharesnapshot=None, # type: Optional[str] + recursive=None, # type: Optional[bool] + **kwargs # type: Any + ): + # type: (...) -> None + """Closes all handles open for given directory. + + :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk + (‘*’) is a wildcard that specifies all handles. + :type handle_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. + :type marker: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param recursive: Specifies operation should apply to the directory specified in the URI, its + files, its subdirectories and their files. + :type recursive: bool + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "forceclosehandles" + accept = "application/xml" + + # Construct URL + url = self.force_close_handles.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') + if recursive is not None: + header_parameters['x-ms-recursive'] = self._serialize.header("recursive", recursive, 'bool') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-marker']=self._deserialize('str', response.headers.get('x-ms-marker')) + response_headers['x-ms-number-of-handles-closed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')) + response_headers['x-ms-number-of-handles-failed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')) + + if cls: + return cls(pipeline_response, None, response_headers) + + force_close_handles.metadata = {'url': '/{shareName}/{directory}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_file_operations.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_file_operations.py new file mode 100644 index 0000000..577c303 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_file_operations.py @@ -0,0 +1,1797 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class FileOperations(object): + """FileOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.fileshare.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create( + self, + file_content_length, # type: int + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + file_permission="inherit", # type: Optional[str] + file_permission_key=None, # type: Optional[str] + file_attributes="none", # type: str + file_creation_time="now", # type: str + file_last_write_time="now", # type: str + file_http_headers=None, # type: Optional["_models.FileHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Creates a new file or replaces a file. Note it only initializes the file with no content. + + :param file_content_length: Specifies the maximum size for the file, up to 4 TB. + :type file_content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. + :type metadata: str + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param file_attributes: If specified, the provided file attributes shall be set. Default value: + ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. Default value: Now. + :type file_last_write_time: str + :param file_http_headers: Parameter group. + :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _file_content_type = None + _file_content_encoding = None + _file_content_language = None + _file_cache_control = None + _file_content_md5 = None + _file_content_disposition = None + _lease_id = None + if file_http_headers is not None: + _file_content_type = file_http_headers.file_content_type + _file_content_encoding = file_http_headers.file_content_encoding + _file_content_language = file_http_headers.file_content_language + _file_cache_control = file_http_headers.file_cache_control + _file_content_md5 = file_http_headers.file_content_md5 + _file_content_disposition = file_http_headers.file_content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + file_type_constant = "file" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') + header_parameters['x-ms-type'] = self._serialize.header("file_type_constant", file_type_constant, 'str') + if _file_content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", _file_content_type, 'str') + if _file_content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", _file_content_encoding, 'str') + if _file_content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", _file_content_language, 'str') + if _file_cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", _file_cache_control, 'str') + if _file_content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", _file_content_md5, 'bytearray') + if _file_content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", _file_content_disposition, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) + response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) + response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) + response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) + response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) + response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + def download( + self, + timeout=None, # type: Optional[int] + range=None, # type: Optional[str] + range_get_content_md5=None, # type: Optional[bool] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> IO + """Reads or downloads a file from the system, including its metadata and properties. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param range: Return file data only from the specified byte range. + :type range: str + :param range_get_content_md5: When this header is set to true and specified together with the + Range header, the service returns the MD5 hash for the range, as long as the range is less than + or equal to 4 MB in size. + :type range_get_content_md5: bool + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: IO, or the result of cls(response) + :rtype: IO + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[IO] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + accept = "application/xml" + + # Construct URL + url = self.download.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if range_get_content_md5 is not None: + header_parameters['x-ms-range-get-content-md5'] = self._serialize.header("range_get_content_md5", range_get_content_md5, 'bool') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-content-md5')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) + response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) + response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) + response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) + response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['Content-Range']=self._deserialize('str', response.headers.get('Content-Range')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Accept-Ranges']=self._deserialize('str', response.headers.get('Accept-Ranges')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-content-md5']=self._deserialize('bytearray', response.headers.get('x-ms-content-md5')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) + response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) + response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) + response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) + response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + download.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + def get_properties( + self, + sharesnapshot=None, # type: Optional[str] + timeout=None, # type: Optional[int] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Returns all user-defined metadata, standard HTTP properties, and system properties for the + file. It does not return the content of the file. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.head(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['x-ms-type']=self._deserialize('str', response.headers.get('x-ms-type')) + response_headers['Content-Length']=self._deserialize('long', response.headers.get('Content-Length')) + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['Content-Encoding']=self._deserialize('str', response.headers.get('Content-Encoding')) + response_headers['Cache-Control']=self._deserialize('str', response.headers.get('Cache-Control')) + response_headers['Content-Disposition']=self._deserialize('str', response.headers.get('Content-Disposition')) + response_headers['Content-Language']=self._deserialize('str', response.headers.get('Content-Language')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-completion-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')) + response_headers['x-ms-copy-status-description']=self._deserialize('str', response.headers.get('x-ms-copy-status-description')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-progress']=self._deserialize('str', response.headers.get('x-ms-copy-progress')) + response_headers['x-ms-copy-source']=self._deserialize('str', response.headers.get('x-ms-copy-source')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + response_headers['x-ms-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-server-encrypted')) + response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) + response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) + response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) + response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) + response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + def delete( + self, + timeout=None, # type: Optional[int] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """removes the file from the storage account. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + def set_http_headers( + self, + timeout=None, # type: Optional[int] + file_content_length=None, # type: Optional[int] + file_permission="inherit", # type: Optional[str] + file_permission_key=None, # type: Optional[str] + file_attributes="none", # type: str + file_creation_time="now", # type: str + file_last_write_time="now", # type: str + file_http_headers=None, # type: Optional["_models.FileHTTPHeaders"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Sets HTTP headers on the file. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param file_content_length: Resizes a file to the specified size. If the specified byte value + is less than the current size of the file, then all ranges above the specified byte value are + cleared. + :type file_content_length: long + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param file_attributes: If specified, the provided file attributes shall be set. Default value: + ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. Default value: Now. + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. Default value: Now. + :type file_last_write_time: str + :param file_http_headers: Parameter group. + :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _file_content_type = None + _file_content_encoding = None + _file_content_language = None + _file_cache_control = None + _file_content_md5 = None + _file_content_disposition = None + _lease_id = None + if file_http_headers is not None: + _file_content_type = file_http_headers.file_content_type + _file_content_encoding = file_http_headers.file_content_encoding + _file_content_language = file_http_headers.file_content_language + _file_cache_control = file_http_headers.file_cache_control + _file_content_md5 = file_http_headers.file_content_md5 + _file_content_disposition = file_http_headers.file_content_disposition + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.set_http_headers.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if file_content_length is not None: + header_parameters['x-ms-content-length'] = self._serialize.header("file_content_length", file_content_length, 'long') + if _file_content_type is not None: + header_parameters['x-ms-content-type'] = self._serialize.header("file_content_type", _file_content_type, 'str') + if _file_content_encoding is not None: + header_parameters['x-ms-content-encoding'] = self._serialize.header("file_content_encoding", _file_content_encoding, 'str') + if _file_content_language is not None: + header_parameters['x-ms-content-language'] = self._serialize.header("file_content_language", _file_content_language, 'str') + if _file_cache_control is not None: + header_parameters['x-ms-cache-control'] = self._serialize.header("file_cache_control", _file_cache_control, 'str') + if _file_content_md5 is not None: + header_parameters['x-ms-content-md5'] = self._serialize.header("file_content_md5", _file_content_md5, 'bytearray') + if _file_content_disposition is not None: + header_parameters['x-ms-content-disposition'] = self._serialize.header("file_content_disposition", _file_content_disposition, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", file_attributes, 'str') + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", file_creation_time, 'str') + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", file_last_write_time, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + response_headers['x-ms-file-attributes']=self._deserialize('str', response.headers.get('x-ms-file-attributes')) + response_headers['x-ms-file-creation-time']=self._deserialize('str', response.headers.get('x-ms-file-creation-time')) + response_headers['x-ms-file-last-write-time']=self._deserialize('str', response.headers.get('x-ms-file-last-write-time')) + response_headers['x-ms-file-change-time']=self._deserialize('str', response.headers.get('x-ms-file-change-time')) + response_headers['x-ms-file-id']=self._deserialize('str', response.headers.get('x-ms-file-id')) + response_headers['x-ms-file-parent-id']=self._deserialize('str', response.headers.get('x-ms-file-parent-id')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_http_headers.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + def set_metadata( + self, + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Updates user-defined metadata for the specified file. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. + :type metadata: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "metadata" + accept = "application/xml" + + # Construct URL + url = self.set_metadata.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + def acquire_lease( + self, + timeout=None, # type: Optional[int] + duration=None, # type: Optional[int] + proposed_lease_id=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] The Lease File operation establishes and manages a lock on a file for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "lease" + action = "acquire" + accept = "application/xml" + + # Construct URL + url = self.acquire_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + def release_lease( + self, + lease_id, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] The Lease File operation establishes and manages a lock on a file for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "lease" + action = "release" + accept = "application/xml" + + # Construct URL + url = self.release_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + def change_lease( + self, + lease_id, # type: str + timeout=None, # type: Optional[int] + proposed_lease_id=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] The Lease File operation establishes and manages a lock on a file for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "lease" + action = "change" + accept = "application/xml" + + # Construct URL + url = self.change_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + def break_lease( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """[Update] The Lease File operation establishes and manages a lock on a file for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "lease" + action = "break" + accept = "application/xml" + + # Construct URL + url = self.break_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + def upload_range( + self, + range, # type: str + content_length, # type: int + timeout=None, # type: Optional[int] + file_range_write="update", # type: Union[str, "_models.FileRangeWriteType"] + content_md5=None, # type: Optional[bytearray] + optionalbody=None, # type: Optional[IO] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Upload a range of bytes to a file. + + :param range: Specifies the range of bytes to be written. Both the start and end of the range + must be specified. For an update operation, the range can be up to 4 MB in size. For a clear + operation, the range can be up to the value of the file's full size. The File service accepts + only a single byte range for the Range and 'x-ms-range' headers, and the byte range must be + specified in the following format: bytes=startByte-endByte. + :type range: str + :param content_length: Specifies the number of bytes being transmitted in the request body. + When the x-ms-write header is set to clear, the value of this header must be set to zero. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param file_range_write: Specify one of the following options: - Update: Writes the bytes + specified by the request body into the specified range. The Range and Content-Length headers + must match to perform the update. - Clear: Clears the specified range and releases the space + used in storage for that range. To clear a range, set the Content-Length header to zero, and + set the Range header to a value that indicates the range to clear, up to maximum file size. + :type file_range_write: str or ~azure.storage.fileshare.models.FileRangeWriteType + :param content_md5: An MD5 hash of the content. This hash is used to verify the integrity of + the data during transport. When the Content-MD5 header is specified, the File service compares + the hash of the content that has arrived with the header value that was sent. If the two hashes + do not match, the operation will fail with error code 400 (Bad Request). + :type content_md5: bytearray + :param optionalbody: Initial data. + :type optionalbody: IO + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "range" + content_type = kwargs.pop("content_type", "application/octet-stream") + accept = "application/xml" + + # Construct URL + url = self.upload_range.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-write'] = self._serialize.header("file_range_write", file_range_write, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("content_md5", content_md5, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content_kwargs['stream_content'] = optionalbody + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['Content-MD5']=self._deserialize('bytearray', response.headers.get('Content-MD5')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_range.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + def upload_range_from_url( + self, + range, # type: str + copy_source, # type: str + content_length, # type: int + timeout=None, # type: Optional[int] + source_range=None, # type: Optional[str] + source_content_crc64=None, # type: Optional[bytearray] + copy_source_authorization=None, # type: Optional[str] + source_modified_access_conditions=None, # type: Optional["_models.SourceModifiedAccessConditions"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Upload a range of bytes to a file where the contents are read from a URL. + + :param range: Writes data to the specified byte range in the file. + :type range: str + :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy + a file to another file within the same storage account, you may use Shared Key to authenticate + the source file. If you are copying a file from another storage account, or if you are copying + a blob from the same storage account or another storage account, then you must authenticate the + source file or blob using a shared access signature. If the source is a public blob, no + authentication is required to perform the copy operation. A file in a share snapshot can also + be specified as a copy source. + :type copy_source: str + :param content_length: Specifies the number of bytes being transmitted in the request body. + When the x-ms-write header is set to clear, the value of this header must be set to zero. + :type content_length: long + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param source_range: Bytes of source data in the specified range. + :type source_range: str + :param source_content_crc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. + :type source_content_crc64: bytearray + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. + :type copy_source_authorization: str + :param source_modified_access_conditions: Parameter group. + :type source_modified_access_conditions: ~azure.storage.fileshare.models.SourceModifiedAccessConditions + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _source_if_match_crc64 = None + _source_if_none_match_crc64 = None + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if source_modified_access_conditions is not None: + _source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 + _source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 + comp = "range" + accept = "application/xml" + + # Construct URL + url = self.upload_range_from_url.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if source_range is not None: + header_parameters['x-ms-source-range'] = self._serialize.header("source_range", source_range, 'str') + header_parameters['x-ms-write'] = self._serialize.header("self._config.file_range_write_from_url", self._config.file_range_write_from_url, 'str') + header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long') + if source_content_crc64 is not None: + header_parameters['x-ms-source-content-crc64'] = self._serialize.header("source_content_crc64", source_content_crc64, 'bytearray') + if _source_if_match_crc64 is not None: + header_parameters['x-ms-source-if-match-crc64'] = self._serialize.header("source_if_match_crc64", _source_if_match_crc64, 'bytearray') + if _source_if_none_match_crc64 is not None: + header_parameters['x-ms-source-if-none-match-crc64'] = self._serialize.header("source_if_none_match_crc64", _source_if_none_match_crc64, 'bytearray') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if copy_source_authorization is not None: + header_parameters['x-ms-copy-source-authorization'] = self._serialize.header("copy_source_authorization", copy_source_authorization, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-content-crc64']=self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-request-server-encrypted']=self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_range_from_url.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + def get_range_list( + self, + sharesnapshot=None, # type: Optional[str] + prevsharesnapshot=None, # type: Optional[str] + timeout=None, # type: Optional[int] + range=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> "_models.ShareFileRangeList" + """Returns the list of valid ranges for a file. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param prevsharesnapshot: The previous snapshot parameter is an opaque DateTime value that, + when present, specifies the previous snapshot. + :type prevsharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param range: Specifies the range of bytes over which to list ranges, inclusively. + :type range: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ShareFileRangeList, or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ShareFileRangeList + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareFileRangeList"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "rangelist" + accept = "application/xml" + + # Construct URL + url = self.get_range_list.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if prevsharesnapshot is not None: + query_parameters['prevsharesnapshot'] = self._serialize.query("prevsharesnapshot", prevsharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if range is not None: + header_parameters['x-ms-range'] = self._serialize.header("range", range, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['x-ms-content-length']=self._deserialize('long', response.headers.get('x-ms-content-length')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ShareFileRangeList', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_range_list.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + def start_copy( + self, + copy_source, # type: str + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + file_permission="inherit", # type: Optional[str] + file_permission_key=None, # type: Optional[str] + copy_file_smb_info=None, # type: Optional["_models.CopyFileSmbInfo"] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Copies a blob or file to a destination file within the storage account. + + :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy + a file to another file within the same storage account, you may use Shared Key to authenticate + the source file. If you are copying a file from another storage account, or if you are copying + a blob from the same storage account or another storage account, then you must authenticate the + source file or blob using a shared access signature. If the source is a public blob, no + authentication is required to perform the copy operation. A file in a share snapshot can also + be specified as a copy source. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. + :type metadata: str + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission_key: str + :param copy_file_smb_info: Parameter group. + :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _file_permission_copy_mode = None + _ignore_read_only = None + _file_attributes = None + _file_creation_time = None + _file_last_write_time = None + _set_archive_attribute = None + _lease_id = None + if copy_file_smb_info is not None: + _file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode + _ignore_read_only = copy_file_smb_info.ignore_read_only + _file_attributes = copy_file_smb_info.file_attributes + _file_creation_time = copy_file_smb_info.file_creation_time + _file_last_write_time = copy_file_smb_info.file_last_write_time + _set_archive_attribute = copy_file_smb_info.set_archive_attribute + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + accept = "application/xml" + + # Construct URL + url = self.start_copy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-copy-source'] = self._serialize.header("copy_source", copy_source, 'str') + if file_permission is not None: + header_parameters['x-ms-file-permission'] = self._serialize.header("file_permission", file_permission, 'str') + if file_permission_key is not None: + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + if _file_permission_copy_mode is not None: + header_parameters['x-ms-file-permission-copy-mode'] = self._serialize.header("file_permission_copy_mode", _file_permission_copy_mode, 'str') + if _ignore_read_only is not None: + header_parameters['x-ms-file-copy-ignore-read-only'] = self._serialize.header("ignore_read_only", _ignore_read_only, 'bool') + if _file_attributes is not None: + header_parameters['x-ms-file-attributes'] = self._serialize.header("file_attributes", _file_attributes, 'str') + if _file_creation_time is not None: + header_parameters['x-ms-file-creation-time'] = self._serialize.header("file_creation_time", _file_creation_time, 'str') + if _file_last_write_time is not None: + header_parameters['x-ms-file-last-write-time'] = self._serialize.header("file_last_write_time", _file_last_write_time, 'str') + if _set_archive_attribute is not None: + header_parameters['x-ms-file-copy-set-archive'] = self._serialize.header("set_archive_attribute", _set_archive_attribute, 'bool') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-copy-id']=self._deserialize('str', response.headers.get('x-ms-copy-id')) + response_headers['x-ms-copy-status']=self._deserialize('str', response.headers.get('x-ms-copy-status')) + + if cls: + return cls(pipeline_response, None, response_headers) + + start_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + def abort_copy( + self, + copy_id, # type: str + timeout=None, # type: Optional[int] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Aborts a pending Copy File operation, and leaves a destination file with zero length and full + metadata. + + :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy + File operation. + :type copy_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "copy" + copy_action_abort_constant = "abort" + accept = "application/xml" + + # Construct URL + url = self.abort_copy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['copyid'] = self._serialize.query("copy_id", copy_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-copy-action'] = self._serialize.header("copy_action_abort_constant", copy_action_abort_constant, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + abort_copy.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + def list_handles( + self, + marker=None, # type: Optional[str] + maxresults=None, # type: Optional[int] + timeout=None, # type: Optional[int] + sharesnapshot=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> "_models.ListHandlesResponse" + """Lists handles for file. + + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. If the request does not + specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 + items. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListHandlesResponse, or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListHandlesResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHandlesResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "listhandles" + accept = "application/xml" + + # Construct URL + url = self.list_handles.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListHandlesResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore + + def force_close_handles( + self, + handle_id, # type: str + timeout=None, # type: Optional[int] + marker=None, # type: Optional[str] + sharesnapshot=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Closes all handles open for given file. + + :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk + (‘*’) is a wildcard that specifies all handles. + :type handle_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. + :type marker: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "forceclosehandles" + accept = "application/xml" + + # Construct URL + url = self.force_close_handles.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-handle-id'] = self._serialize.header("handle_id", handle_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-marker']=self._deserialize('str', response.headers.get('x-ms-marker')) + response_headers['x-ms-number-of-handles-closed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-closed')) + response_headers['x-ms-number-of-handles-failed']=self._deserialize('int', response.headers.get('x-ms-number-of-handles-failed')) + + if cls: + return cls(pipeline_response, None, response_headers) + + force_close_handles.metadata = {'url': '/{shareName}/{directory}/{fileName}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_service_operations.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_service_operations.py new file mode 100644 index 0000000..aaf67d3 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_service_operations.py @@ -0,0 +1,276 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class ServiceOperations(object): + """ServiceOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.fileshare.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def set_properties( + self, + storage_service_properties, # type: "_models.StorageServiceProperties" + timeout=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Sets properties for a storage account's File service endpoint, including properties for Storage + Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. + :type storage_service_properties: ~azure.storage.fileshare.models.StorageServiceProperties + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "properties" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.set_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {'url': '/'} # type: ignore + + def get_properties( + self, + timeout=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.StorageServiceProperties" + """Gets the properties of a storage account's File service, including properties for Storage + Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageServiceProperties, or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.StorageServiceProperties + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = self._deserialize('StorageServiceProperties', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_properties.metadata = {'url': '/'} # type: ignore + + def list_shares_segment( + self, + prefix=None, # type: Optional[str] + marker=None, # type: Optional[str] + maxresults=None, # type: Optional[int] + include=None, # type: Optional[List[Union[str, "_models.ListSharesIncludeType"]]] + timeout=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.ListSharesResponse" + """The List Shares Segment operation returns a list of the shares and share snapshots under the + specified account. + + :param prefix: Filters the results to return only entries whose name begins with the specified + prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. If the request does not + specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 + items. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. + :type include: list[str or ~azure.storage.fileshare.models.ListSharesIncludeType] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListSharesResponse, or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListSharesResponse + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListSharesResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "list" + accept = "application/xml" + + # Construct URL + url = self.list_shares_segment.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if prefix is not None: + query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + if include is not None: + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + deserialized = self._deserialize('ListSharesResponse', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + list_shares_segment.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_share_operations.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_share_operations.py new file mode 100644 index 0000000..02a94bf --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_generated/operations/_share_operations.py @@ -0,0 +1,1506 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse + +from .. import models as _models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class ShareOperations(object): + """ShareOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.fileshare.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def create( + self, + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + quota=None, # type: Optional[int] + access_tier=None, # type: Optional[Union[str, "_models.ShareAccessTier"]] + enabled_protocols=None, # type: Optional[str] + root_squash=None, # type: Optional[Union[str, "_models.ShareRootSquash"]] + **kwargs # type: Any + ): + # type: (...) -> None + """Creates a new share under the specified account. If the share with the same name already + exists, the operation fails. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. + :type metadata: str + :param quota: Specifies the maximum size of the share, in gigabytes. + :type quota: int + :param access_tier: Specifies the access tier of the share. + :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier + :param enabled_protocols: Protocols to enable on the share. + :type enabled_protocols: str + :param root_squash: Root squash to set on the share. Only valid for NFS shares. + :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "share" + accept = "application/xml" + + # Construct URL + url = self.create.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + if quota is not None: + header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) + if access_tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if enabled_protocols is not None: + header_parameters['x-ms-enabled-protocols'] = self._serialize.header("enabled_protocols", enabled_protocols, 'str') + if root_squash is not None: + header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{shareName}'} # type: ignore + + def get_properties( + self, + sharesnapshot=None, # type: Optional[str] + timeout=None, # type: Optional[int] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Returns all user-defined metadata and system properties for the specified share or share + snapshot. The data returned does not include the share's list of files. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "share" + accept = "application/xml" + + # Construct URL + url = self.get_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-share-quota']=self._deserialize('int', response.headers.get('x-ms-share-quota')) + response_headers['x-ms-share-provisioned-iops']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops')) + response_headers['x-ms-share-provisioned-ingress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps')) + response_headers['x-ms-share-provisioned-egress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps')) + response_headers['x-ms-share-next-allowed-quota-downgrade-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time')) + response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration')) + response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state')) + response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status')) + response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier')) + response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')) + response_headers['x-ms-access-tier-transition-state']=self._deserialize('str', response.headers.get('x-ms-access-tier-transition-state')) + response_headers['x-ms-enabled-protocols']=self._deserialize('str', response.headers.get('x-ms-enabled-protocols')) + response_headers['x-ms-root-squash']=self._deserialize('str', response.headers.get('x-ms-root-squash')) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {'url': '/{shareName}'} # type: ignore + + def delete( + self, + sharesnapshot=None, # type: Optional[str] + timeout=None, # type: Optional[int] + delete_snapshots=None, # type: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Operation marks the specified share or share snapshot for deletion. The share or share snapshot + and any files contained within it are later deleted during garbage collection. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param delete_snapshots: Specifies the option include to delete the base share and all of its + snapshots. + :type delete_snapshots: str or ~azure.storage.fileshare.models.DeleteSnapshotsOptionType + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "share" + accept = "application/xml" + + # Construct URL + url = self.delete.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if delete_snapshots is not None: + header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{shareName}'} # type: ignore + + def acquire_lease( + self, + timeout=None, # type: Optional[int] + duration=None, # type: Optional[int] + proposed_lease_id=None, # type: Optional[str] + sharesnapshot=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "lease" + action = "acquire" + restype = "share" + accept = "application/xml" + + # Construct URL + url = self.acquire_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if duration is not None: + header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {'url': '/{shareName}'} # type: ignore + + def release_lease( + self, + lease_id, # type: str + timeout=None, # type: Optional[int] + sharesnapshot=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "lease" + action = "release" + restype = "share" + accept = "application/xml" + + # Construct URL + url = self.release_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {'url': '/{shareName}'} # type: ignore + + def change_lease( + self, + lease_id, # type: str + timeout=None, # type: Optional[int] + proposed_lease_id=None, # type: Optional[str] + sharesnapshot=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. + :type proposed_lease_id: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "lease" + action = "change" + restype = "share" + accept = "application/xml" + + # Construct URL + url = self.change_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if proposed_lease_id is not None: + header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {'url': '/{shareName}'} # type: ignore + + def renew_lease( + self, + lease_id, # type: str + timeout=None, # type: Optional[int] + sharesnapshot=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + comp = "lease" + action = "renew" + restype = "share" + accept = "application/xml" + + # Construct URL + url = self.renew_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + renew_lease.metadata = {'url': '/{shareName}'} # type: ignore + + def break_lease( + self, + timeout=None, # type: Optional[int] + break_period=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + sharesnapshot=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param break_period: For a break operation, proposed duration the lease should continue before + it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining on the lease is used. A new + lease will not be available before the break period has expired, but the lease may be held for + longer than the break period. If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease + breaks immediately. + :type break_period: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. + :type sharesnapshot: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + comp = "lease" + action = "break" + restype = "share" + accept = "application/xml" + + # Construct URL + url = self.break_lease.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if sharesnapshot is not None: + query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str') + if break_period is not None: + header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time')) + response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {'url': '/{shareName}'} # type: ignore + + def create_snapshot( + self, + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Creates a read-only snapshot of a share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. + :type metadata: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "share" + comp = "snapshot" + accept = "application/xml" + + # Construct URL + url = self.create_snapshot.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot')) + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create_snapshot.metadata = {'url': '/{shareName}'} # type: ignore + + def create_permission( + self, + share_permission, # type: "_models.SharePermission" + timeout=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> None + """Create a permission (a security descriptor). + + :param share_permission: A permission (a security descriptor) at the share level. + :type share_permission: ~azure.storage.fileshare.models.SharePermission + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "share" + comp = "filepermission" + content_type = kwargs.pop("content_type", "application/json") + accept = "application/xml" + + # Construct URL + url = self.create_permission.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(share_permission, 'SharePermission') + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key')) + + if cls: + return cls(pipeline_response, None, response_headers) + + create_permission.metadata = {'url': '/{shareName}'} # type: ignore + + def get_permission( + self, + file_permission_key, # type: str + timeout=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> "_models.SharePermission" + """Returns the permission (security descriptor) for a given key. + + :param file_permission_key: Key of the permission to be set for the directory/file. + :type file_permission_key: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SharePermission, or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.SharePermission + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.SharePermission"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "share" + comp = "filepermission" + accept = "application/json" + + # Construct URL + url = self.get_permission.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('SharePermission', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_permission.metadata = {'url': '/{shareName}'} # type: ignore + + def set_properties( + self, + timeout=None, # type: Optional[int] + quota=None, # type: Optional[int] + access_tier=None, # type: Optional[Union[str, "_models.ShareAccessTier"]] + root_squash=None, # type: Optional[Union[str, "_models.ShareRootSquash"]] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Sets properties for the specified share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param quota: Specifies the maximum size of the share, in gigabytes. + :type quota: int + :param access_tier: Specifies the access tier of the share. + :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier + :param root_squash: Root squash to set on the share. Only valid for NFS shares. + :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "share" + comp = "properties" + accept = "application/xml" + + # Construct URL + url = self.set_properties.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if quota is not None: + header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1) + if access_tier is not None: + header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + if root_squash is not None: + header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {'url': '/{shareName}'} # type: ignore + + def set_metadata( + self, + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Sets one or more user-defined name-value pairs for the specified share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. + :type metadata: str + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "share" + comp = "metadata" + accept = "application/xml" + + # Construct URL + url = self.set_metadata.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + if metadata is not None: + header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {'url': '/{shareName}'} # type: ignore + + def get_access_policy( + self, + timeout=None, # type: Optional[int] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> List["_models.SignedIdentifier"] + """Returns information about stored access policies specified on the share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of SignedIdentifier, or the result of cls(response) + :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "share" + comp = "acl" + accept = "application/xml" + + # Construct URL + url = self.get_access_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('[SignedIdentifier]', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_access_policy.metadata = {'url': '/{shareName}'} # type: ignore + + def set_access_policy( + self, + timeout=None, # type: Optional[int] + share_acl=None, # type: Optional[List["_models.SignedIdentifier"]] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> None + """Sets a stored access policy for use with shared access signatures. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param share_acl: The ACL for the share. + :type share_acl: list[~azure.storage.fileshare.models.SignedIdentifier] + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "share" + comp = "acl" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + + # Construct URL + url = self.set_access_policy.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True}} + if share_acl is not None: + body_content = self._serialize.body(share_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt) + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_policy.metadata = {'url': '/{shareName}'} # type: ignore + + def get_statistics( + self, + timeout=None, # type: Optional[int] + lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"] + **kwargs # type: Any + ): + # type: (...) -> "_models.ShareStats" + """Retrieves statistics related to the share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param lease_access_conditions: Parameter group. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ShareStats, or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ShareStats + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareStats"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + restype = "share" + comp = "stats" + accept = "application/xml" + + # Construct URL + url = self.get_statistics.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if _lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ShareStats', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + get_statistics.metadata = {'url': '/{shareName}'} # type: ignore + + def restore( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + deleted_share_name=None, # type: Optional[str] + deleted_share_version=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + """Restores a previously deleted Share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param deleted_share_name: Specifies the name of the preivously-deleted share. + :type deleted_share_name: str + :param deleted_share_version: Specifies the version of the preivously-deleted share. + :type deleted_share_version: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) + :rtype: None + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "share" + comp = "undelete" + accept = "application/xml" + + # Construct URL + url = self.restore.metadata['url'] # type: ignore + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + if deleted_share_name is not None: + header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str') + if deleted_share_version is not None: + header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['ETag']=self._deserialize('str', response.headers.get('ETag')) + response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified')) + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if cls: + return cls(pipeline_response, None, response_headers) + + restore.metadata = {'url': '/{shareName}'} # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_lease.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_lease.py new file mode 100644 index 0000000..7c38145 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_lease.py @@ -0,0 +1,237 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import uuid + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, TypeVar, TYPE_CHECKING +) + +from azure.core.tracing.decorator import distributed_trace +from azure.core.exceptions import HttpResponseError + +from ._shared.response_handlers import return_response_headers, process_storage_error +from ._generated.operations import FileOperations, ShareOperations + +if TYPE_CHECKING: + from datetime import datetime + ShareFileClient = TypeVar("ShareFileClient") + ShareClient = TypeVar("ShareClient") + + +class ShareLeaseClient(object): + """Creates a new ShareLeaseClient. + + This client provides lease operations on a ShareClient or ShareFileClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the file or share to lease. + :type client: ~azure.storage.fileshare.ShareFileClient or + ~azure.storage.fileshare.ShareClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + def __init__( + self, client, lease_id=None + ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs + # type: (Union[ShareFileClient, ShareClient], Optional[str]) -> None + self.id = lease_id or str(uuid.uuid4()) + self.last_modified = None + self.etag = None + if hasattr(client, 'file_name'): + self._client = client._client.file # type: ignore # pylint: disable=protected-access + self._snapshot = None + elif hasattr(client, 'share_name'): + self._client = client._client.share + self._snapshot = client.snapshot + else: + raise TypeError("Lease must use ShareFileClient or ShareClient.") + + def __enter__(self): + return self + + def __exit__(self, *args): + self.release() + + @distributed_trace + def acquire(self, **kwargs): + # type: (**Any) -> None + """Requests a new lease. This operation establishes and manages a lock on a + file or share for write and delete operations. If the file or share does not have an active lease, + the File or Share service creates a lease on the file or share. If the file has an active lease, + you can only request a new lease using the active lease ID. + + + If the file or share does not have an active lease, the File or Share service creates a + lease on the file and returns a new lease ID. + + :keyword int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be + between 15 and 60 seconds. A share lease duration cannot be changed + using renew or change. Default is -1 (infinite share lease). + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + try: + lease_duration = kwargs.pop('lease_duration', -1) + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = self._client.acquire_lease( + timeout=kwargs.pop('timeout', None), + duration=lease_duration, + proposed_lease_id=self.id, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + self.etag = response.get('etag') # type: str + + @distributed_trace + def renew(self, **kwargs): + # type: (Any) -> None + """Renews the share lease. + + The share lease can be renewed if the lease ID specified in the + lease client matches that associated with the share. Note that + the lease may be renewed even if it has expired as long as the share + has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + .. versionadded:: 12.6.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + if isinstance(self._client, FileOperations): + raise TypeError("Lease renewal operations are only valid for ShareClient.") + try: + response = self._client.renew_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + sharesnapshot=self._snapshot, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def release(self, **kwargs): + # type: (Any) -> None + """Releases the lease. The lease may be released if the lease ID specified on the request matches + that associated with the share or file. Releasing the lease allows another client to immediately acquire + the lease for the share or file as soon as the release is complete. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + try: + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = self._client.release_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and + a new lease ID in x-ms-proposed-lease-id. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The File or Share service will raise an error + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + try: + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = self._client.change_lease( + lease_id=self.id, + proposed_lease_id=proposed_lease_id, + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def break_lease(self, **kwargs): + # type: (Any) -> int + """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. An infinite lease breaks immediately. + + Once a lease is broken, it cannot be changed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :keyword int lease_break_period: + This is the proposed duration of seconds that the share lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the share lease. If longer, the time remaining on the share lease is used. + A new share lease will not be available before the break period has + expired, but the share lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration share lease breaks after the remaining share lease + period elapses, and an infinite share lease breaks immediately. + + .. versionadded:: 12.6.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + try: + lease_break_period = kwargs.pop('lease_break_period', None) + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + if isinstance(self._client, ShareOperations): + kwargs['break_period'] = lease_break_period + if isinstance(self._client, FileOperations) and lease_break_period: + raise TypeError("Setting a lease break period is only applicable to Share leases.") + + response = self._client.break_lease( + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_models.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_models.py new file mode 100644 index 0000000..0f7a2fa --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_models.py @@ -0,0 +1,1011 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines + +from enum import Enum + +from azure.core.paging import PageIterator +from azure.core.exceptions import HttpResponseError +from ._parser import _parse_datetime_from_str +from ._shared.response_handlers import return_context_and_deserialized, process_storage_error +from ._shared.models import DictMixin, get_enum_value +from ._generated.models import Metrics as GeneratedMetrics +from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy +from ._generated.models import CorsRule as GeneratedCorsRule +from ._generated.models import ShareProtocolSettings as GeneratedShareProtocolSettings +from ._generated.models import ShareSmbSettings as GeneratedShareSmbSettings +from ._generated.models import SmbMultichannel as GeneratedSmbMultichannel +from ._generated.models import AccessPolicy as GenAccessPolicy +from ._generated.models import DirectoryItem + + +def _wrap_item(item): + if isinstance(item, DirectoryItem): + return {'name': item.name, 'is_directory': True} + return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} + + +class Metrics(GeneratedMetrics): + """A summary of request statistics grouped by API in hour or minute aggregates + for files. + + All required parameters must be populated in order to send to Azure. + + :keyword str version: The version of Storage Analytics to configure. + :keyword bool enabled: Required. Indicates whether metrics are enabled for the + File service. + :keyword bool include_ap_is: Indicates whether metrics should generate summary + statistics for called API operations. + :keyword ~azure.storage.fileshare.RetentionPolicy retention_policy: Determines how long the associated data should + persist. + """ + + def __init__(self, **kwargs): + self.version = kwargs.get('version', u'1.0') + self.enabled = kwargs.get('enabled', False) + self.include_apis = kwargs.get('include_apis') + self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + version=generated.version, + enabled=generated.enabled, + include_apis=generated.include_apis, + retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access + ) + + +class RetentionPolicy(GeneratedRetentionPolicy): + """The retention policy which determines how long the associated data should + persist. + + All required parameters must be populated in order to send to Azure. + + :param bool enabled: Required. Indicates whether a retention policy is enabled + for the storage service. + :param int days: Indicates the number of days that metrics or logging or + soft-deleted data should be retained. All data older than this value will + be deleted. + """ + + def __init__(self, enabled=False, days=None): + self.enabled = enabled + self.days = days + if self.enabled and (self.days is None): + raise ValueError("If policy is enabled, 'days' must be specified.") + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + enabled=generated.enabled, + days=generated.days, + ) + + +class CorsRule(GeneratedCorsRule): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param list(str) allowed_origins: + A list of origin domains that will be allowed via CORS, or "*" to allow + all domains. The list of must contain at least one entry. Limited to 64 + origin domains. Each allowed origin can have up to 256 characters. + :param list(str) allowed_methods: + A list of HTTP methods that are allowed to be executed by the origin. + The list of must contain at least one entry. For Azure Storage, + permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. + :keyword list(str) allowed_headers: + Defaults to an empty list. A list of headers allowed to be part of + the cross-origin request. Limited to 64 defined headers and 2 prefixed + headers. Each header can be up to 256 characters. + :keyword list(str) exposed_headers: + Defaults to an empty list. A list of response headers to expose to CORS + clients. Limited to 64 defined headers and two prefixed headers. Each + header can be up to 256 characters. + :keyword int max_age_in_seconds: + The number of seconds that the client/browser should cache a + preflight response. + """ + + def __init__(self, allowed_origins, allowed_methods, **kwargs): + self.allowed_origins = ','.join(allowed_origins) + self.allowed_methods = ','.join(allowed_methods) + self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) + self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) + self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) + + @classmethod + def _from_generated(cls, generated): + return cls( + [generated.allowed_origins], + [generated.allowed_methods], + allowed_headers=[generated.allowed_headers], + exposed_headers=[generated.exposed_headers], + max_age_in_seconds=generated.max_age_in_seconds, + ) + + +class ShareSmbSettings(GeneratedShareSmbSettings): + """ Settings for the SMB protocol. + + :keyword SmbMultichannel multichannel: Sets the multichannel settings. + """ + def __init__(self, **kwargs): + self.multichannel = kwargs.get('multichannel') + if self.multichannel is None: + raise ValueError("The value 'multichannel' must be specified.") + + +class SmbMultichannel(GeneratedSmbMultichannel): + """ Settings for Multichannel. + + :keyword bool enabled: If SMB Multichannel is enabled. + """ + def __init__(self, **kwargs): + self.enabled = kwargs.get('enabled') + if self.enabled is None: + raise ValueError("The value 'enabled' must be specified.") + + +class ShareProtocolSettings(GeneratedShareProtocolSettings): + """Protocol Settings class used by the set and get service properties methods in the share service. + + Contains protocol properties of the share service such as the SMB setting of the share service. + + :keyword SmbSettings smb: Sets SMB settings. + """ + def __init__(self, **kwargs): + self.smb = kwargs.get('smb') + if self.smb is None: + raise ValueError("The value 'smb' must be specified.") + + @classmethod + def _from_generated(cls, generated): + return cls( + smb=generated.smb) + + +class AccessPolicy(GenAccessPolicy): + """Access Policy class used by the set and get acl methods in each service. + + A stored access policy can specify the start time, expiry time, and + permissions for the Shared Access Signatures with which it's associated. + Depending on how you want to control access to your resource, you can + specify all of these parameters within the stored access policy, and omit + them from the URL for the Shared Access Signature. Doing so permits you to + modify the associated signature's behavior at any time, as well as to revoke + it. Or you can specify one or more of the access policy parameters within + the stored access policy, and the others on the URL. Finally, you can + specify all of the parameters on the URL. In this case, you can use the + stored access policy to revoke the signature, but not to modify its behavior. + + Together the Shared Access Signature and the stored access policy must + include all fields required to authenticate the signature. If any required + fields are missing, the request will fail. Likewise, if a field is specified + both in the Shared Access Signature URL and in the stored access policy, the + request will fail with status code 400 (Bad Request). + + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.fileshare.FileSasPermissions or + ~azure.storage.fileshare.ShareSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + """ + def __init__(self, permission=None, expiry=None, start=None): + self.start = start + self.expiry = expiry + self.permission = permission + + +class LeaseProperties(DictMixin): + """File or Share Lease Properties. + + :ivar str status: + The lease status of the file or share. Possible values: locked|unlocked + :ivar str state: + Lease state of the file or share. Possible values: available|leased|expired|breaking|broken + :ivar str duration: + When a file or share is leased, specifies whether the lease is of infinite or fixed duration. + """ + + def __init__(self, **kwargs): + self.status = get_enum_value(kwargs.get('x-ms-lease-status')) + self.state = get_enum_value(kwargs.get('x-ms-lease-state')) + self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) + + @classmethod + def _from_generated(cls, generated): + lease = cls() + lease.status = get_enum_value(generated.properties.lease_status) + lease.state = get_enum_value(generated.properties.lease_state) + lease.duration = get_enum_value(generated.properties.lease_duration) + return lease + + +class ContentSettings(DictMixin): + """Used to store the content settings of a file. + + :param str content_type: + The content type specified for the file. If no content type was + specified, the default content type is application/octet-stream. + :param str content_encoding: + If the content_encoding has previously been set + for the file, that value is stored. + :param str content_language: + If the content_language has previously been set + for the file, that value is stored. + :param str content_disposition: + content_disposition conveys additional information about how to + process the response payload, and also can be used to attach + additional metadata. If content_disposition has previously been set + for the file, that value is stored. + :param str cache_control: + If the cache_control has previously been set for + the file, that value is stored. + :param bytearray content_md5: + If the content_md5 has been set for the file, this response + header is stored so that the client can check for message content + integrity. + """ + + def __init__( + self, content_type=None, content_encoding=None, + content_language=None, content_disposition=None, + cache_control=None, content_md5=None, **kwargs): + + self.content_type = content_type or kwargs.get('Content-Type') + self.content_encoding = content_encoding or kwargs.get('Content-Encoding') + self.content_language = content_language or kwargs.get('Content-Language') + self.content_md5 = content_md5 or kwargs.get('Content-MD5') + self.content_disposition = content_disposition or kwargs.get('Content-Disposition') + self.cache_control = cache_control or kwargs.get('Cache-Control') + + @classmethod + def _from_generated(cls, generated): + settings = cls() + settings.content_type = generated.properties.content_type or None + settings.content_encoding = generated.properties.content_encoding or None + settings.content_language = generated.properties.content_language or None + settings.content_md5 = generated.properties.content_md5 or None + settings.content_disposition = generated.properties.content_disposition or None + settings.cache_control = generated.properties.cache_control or None + return settings + + +class ShareProperties(DictMixin): + """Share's properties class. + + :ivar str name: + The name of the share. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the share was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar int quota: + The allocated quota. + :ivar str access_tier: + The share's access tier. + :ivar dict metadata: A dict with name_value pairs to associate with the + share as metadata. + :ivar str snapshot: + Snapshot of the share. + :ivar bool deleted: + To indicate if this share is deleted or not. + This is a service returned value, and the value will be set when list shared including deleted ones. + :ivar datetime deleted: + To indicate the deleted time of the deleted share. + This is a service returned value, and the value will be set when list shared including deleted ones. + :ivar str version: + To indicate the version of deleted share. + This is a service returned value, and the value will be set when list shared including deleted ones. + :ivar int remaining_retention_days: + To indicate how many remaining days the deleted share will be kept. + This is a service returned value, and the value will be set when list shared including deleted ones. + :ivar ~azure.storage.fileshare.models.ShareRootSquash or str root_squash: + Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. + :ivar list(str) protocols: + Indicates the protocols enabled on the share. The protocol can be either SMB or NFS. + """ + + def __init__(self, **kwargs): + self.name = None + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.quota = kwargs.get('x-ms-share-quota') + self.access_tier = kwargs.get('x-ms-access-tier') + self.next_allowed_quota_downgrade_time = kwargs.get('x-ms-share-next-allowed-quota-downgrade-time') + self.metadata = kwargs.get('metadata') + self.snapshot = None + self.deleted = None + self.deleted_time = None + self.version = None + self.remaining_retention_days = None + self.provisioned_egress_mbps = kwargs.get('x-ms-share-provisioned-egress-mbps') + self.provisioned_ingress_mbps = kwargs.get('x-ms-share-provisioned-ingress-mbps') + self.provisioned_iops = kwargs.get('x-ms-share-provisioned-iops') + self.lease = LeaseProperties(**kwargs) + self.protocols = [protocol.strip() for protocol in kwargs.get('x-ms-enabled-protocols', None).split(',')]\ + if kwargs.get('x-ms-enabled-protocols', None) else None + self.root_squash = kwargs.get('x-ms-root-squash', None) + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.last_modified = generated.properties.last_modified + props.etag = generated.properties.etag + props.quota = generated.properties.quota + props.access_tier = generated.properties.access_tier + props.next_allowed_quota_downgrade_time = generated.properties.next_allowed_quota_downgrade_time + props.metadata = generated.metadata + props.snapshot = generated.snapshot + props.deleted = generated.deleted + props.deleted_time = generated.properties.deleted_time + props.version = generated.version + props.remaining_retention_days = generated.properties.remaining_retention_days + props.provisioned_egress_mbps = generated.properties.provisioned_egress_m_bps + props.provisioned_ingress_mbps = generated.properties.provisioned_ingress_m_bps + props.provisioned_iops = generated.properties.provisioned_iops + props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + props.protocols = [protocol.strip() for protocol in generated.properties.enabled_protocols.split(',')]\ + if generated.properties.enabled_protocols else None + props.root_squash = generated.properties.root_squash + + return props + + +class SharePropertiesPaged(PageIterator): + """An iterable of Share properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.fileshare.ShareProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only shares whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(SharePropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + prefix=self.prefix, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access + return self._response.next_marker or None, self.current_page + + +class Handle(DictMixin): + """A listed Azure Storage handle item. + + All required parameters must be populated in order to send to Azure. + + :keyword str handle_id: Required. XSMB service handle ID + :keyword str path: Required. File or directory name including full path starting + from share root + :keyword str file_id: Required. FileId uniquely identifies the file or + directory. + :keyword str parent_id: ParentId uniquely identifies the parent directory of the + object. + :keyword str session_id: Required. SMB session ID in context of which the file + handle was opened + :keyword str client_ip: Required. Client IP that opened the handle + :keyword ~datetime.datetime open_time: Required. Time when the session that previously opened + the handle has last been reconnected. (UTC) + :keyword ~datetime.datetime last_reconnect_time: Time handle was last connected to (UTC) + """ + + def __init__(self, **kwargs): + self.id = kwargs.get('handle_id') + self.path = kwargs.get('path') + self.file_id = kwargs.get('file_id') + self.parent_id = kwargs.get('parent_id') + self.session_id = kwargs.get('session_id') + self.client_ip = kwargs.get('client_ip') + self.open_time = kwargs.get('open_time') + self.last_reconnect_time = kwargs.get('last_reconnect_time') + + @classmethod + def _from_generated(cls, generated): + handle = cls() + handle.id = generated.handle_id + handle.path = generated.path + handle.file_id = generated.file_id + handle.parent_id = generated.parent_id + handle.session_id = generated.session_id + handle.client_ip = generated.client_ip + handle.open_time = generated.open_time + handle.last_reconnect_time = generated.last_reconnect_time + return handle + + +class HandlesPaged(PageIterator): + """An iterable of Handles. + + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.fileshare.Handle) + + :param callable command: Function to retrieve the next page of items. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, results_per_page=None, continuation_token=None): + super(HandlesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access + return self._response.next_marker or None, self.current_page + + +class DirectoryProperties(DictMixin): + """Directory's properties class. + + :ivar str name: + The name of the directory. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the directory was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar bool server_encrypted: + Whether encryption is enabled. + :keyword dict metadata: A dict with name_value pairs to associate with the + directory as metadata. + :ivar change_time: Change time for the file. + :vartype change_time: str or ~datetime.datetime + :ivar creation_time: Creation time for the file. + :vartype creation_time: str or ~datetime.datetime + :ivar last_write_time: Last write time for the file. + :vartype last_write_time: str or ~datetime.datetime + :ivar last_access_time: Last access time for the file. + :vartype last_access_time: ~datetime.datetime + :ivar file_attributes: + The file system attributes for files and directories. + :vartype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :ivar permission_key: Key of the permission to be set for the + directory/file. + :vartype permission_key: str + :ivar file_id: Required. FileId uniquely identifies the file or + directory. + :vartype file_id: str + :ivar parent_id: ParentId uniquely identifies the parent directory of the + object. + :vartype parent_id: str + """ + + def __init__(self, **kwargs): + self.name = None + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.server_encrypted = kwargs.get('x-ms-server-encrypted') + self.metadata = kwargs.get('metadata') + self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) + self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) + self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) + self.last_access_time = None + self.file_attributes = kwargs.get('x-ms-file-attributes') + self.permission_key = kwargs.get('x-ms-file-permission-key') + self.file_id = kwargs.get('x-ms-file-id') + self.parent_id = kwargs.get('x-ms-file-parent-id') + self.is_directory = True + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.file_id = generated.file_id + props.file_attributes = generated.attributes + props.last_modified = generated.properties.last_modified + props.creation_time = generated.properties.creation_time + props.last_access_time = generated.properties.last_access_time + props.last_write_time = generated.properties.last_write_time + props.change_time = generated.properties.change_time + props.etag = generated.properties.etag + props.permission_key = generated.permission_key + return props + + +class DirectoryPropertiesPaged(PageIterator): + """An iterable for the contents of a directory. + + This iterable will yield dicts for the contents of the directory. The dicts + will have the keys 'name' (str) and 'is_directory' (bool). + Items that are files (is_directory=False) will have an additional 'content_length' key. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(dict(str, Any)) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only directories whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(DirectoryPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + prefix=self.prefix, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [DirectoryProperties._from_generated(i) for i in self._response.segment.directory_items] # pylint: disable = protected-access + self.current_page.extend([FileProperties._from_generated(i) for i in self._response.segment.file_items]) # pylint: disable = protected-access + return self._response.next_marker or None, self.current_page + + +class FileProperties(DictMixin): + """File's properties class. + + :ivar str name: + The name of the file. + :ivar str path: + The path of the file. + :ivar str share: + The name of share. + :ivar str snapshot: + File snapshot. + :ivar int content_length: + Size of file in bytes. + :ivar dict metadata: A dict with name_value pairs to associate with the + file as metadata. + :ivar str file_type: + Type of the file. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the file was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar int size: + Size of file in bytes. + :ivar str content_range: + The range of bytes. + :ivar bool server_encrypted: + Whether encryption is enabled. + :ivar copy: + The copy properties. + :vartype copy: ~azure.storage.fileshare.CopyProperties + :ivar content_settings: + The content settings for the file. + :vartype content_settings: ~azure.storage.fileshare.ContentSettings + """ + + def __init__(self, **kwargs): + self.name = kwargs.get('name') + self.path = None + self.share = None + self.snapshot = None + self.content_length = kwargs.get('Content-Length') + self.metadata = kwargs.get('metadata') + self.file_type = kwargs.get('x-ms-type') + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.size = kwargs.get('Content-Length') + self.content_range = kwargs.get('Content-Range') + self.server_encrypted = kwargs.get('x-ms-server-encrypted') + self.copy = CopyProperties(**kwargs) + self.content_settings = ContentSettings(**kwargs) + self.lease = LeaseProperties(**kwargs) + self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) + self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) + self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) + self.last_access_time = None + self.file_attributes = kwargs.get('x-ms-file-attributes') + self.permission_key = kwargs.get('x-ms-file-permission-key') + self.file_id = kwargs.get('x-ms-file-id') + self.parent_id = kwargs.get('x-ms-file-parent-id') + self.is_directory = False + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.file_id = generated.file_id + props.etag = generated.properties.etag + props.file_attributes = generated.attributes + props.last_modified = generated.properties.last_modified + props.creation_time = generated.properties.creation_time + props.last_access_time = generated.properties.last_access_time + props.last_write_time = generated.properties.last_write_time + props.change_time = generated.properties.change_time + props.size = generated.properties.content_length + props.permission_key = generated.permission_key + return props + + +class ShareProtocols(str, Enum): + """Enabled protocols on the share""" + SMB = "SMB" + NFS = "NFS" + + +class CopyProperties(DictMixin): + """File Copy Properties. + + :ivar str id: + String identifier for the last attempted Copy File operation where this file + was the destination file. This header does not appear if this file has never + been the destination in a Copy File operation, or if this file has been + modified after a concluded Copy File operation. + :ivar str source: + URL up to 2 KB in length that specifies the source file used in the last attempted + Copy File operation where this file was the destination file. This header does not + appear if this file has never been the destination in a Copy File operation, or if + this file has been modified after a concluded Copy File operation. + :ivar str status: + State of the copy operation identified by Copy ID, with these values: + success: + Copy completed successfully. + pending: + Copy is in progress. Check copy_status_description if intermittent, + non-fatal errors impede copy progress but don't cause failure. + aborted: + Copy was ended by Abort Copy File. + failed: + Copy failed. See copy_status_description for failure details. + :ivar str progress: + Contains the number of bytes copied and the total bytes in the source in the last + attempted Copy File operation where this file was the destination file. Can show + between 0 and Content-Length bytes copied. + :ivar datetime completion_time: + Conclusion time of the last attempted Copy File operation where this file was the + destination file. This value can specify the time of a completed, aborted, or + failed copy attempt. + :ivar str status_description: + Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal + or non-fatal copy operation failure. + :ivar bool incremental_copy: + Copies the snapshot of the source file to a destination file. + The snapshot is copied such that only the differential changes between + the previously copied snapshot are transferred to the destination + :ivar datetime destination_snapshot: + Included if the file is incremental copy or incremental copy snapshot, + if x-ms-copy-status is success. Snapshot time of the last successful + incremental copy snapshot for this file. + """ + + def __init__(self, **kwargs): + self.id = kwargs.get('x-ms-copy-id') + self.source = kwargs.get('x-ms-copy-source') + self.status = get_enum_value(kwargs.get('x-ms-copy-status')) + self.progress = kwargs.get('x-ms-copy-progress') + self.completion_time = kwargs.get('x-ms-copy-completion_time') + self.status_description = kwargs.get('x-ms-copy-status-description') + self.incremental_copy = kwargs.get('x-ms-incremental-copy') + self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') + + @classmethod + def _from_generated(cls, generated): + copy = cls() + copy.id = generated.properties.copy_id or None + copy.status = get_enum_value(generated.properties.copy_status) or None + copy.source = generated.properties.copy_source or None + copy.progress = generated.properties.copy_progress or None + copy.completion_time = generated.properties.copy_completion_time or None + copy.status_description = generated.properties.copy_status_description or None + copy.incremental_copy = generated.properties.incremental_copy or None + copy.destination_snapshot = generated.properties.destination_snapshot or None + return copy + + +class FileSasPermissions(object): + """FileSasPermissions class to be used with + generating shared access signature operations. + + :param bool read: + Read the content, properties, metadata. Use the file as the source of a copy + operation. + :param bool create: + Create a new file or copy a file to a new file. + :param bool write: + Create or write content, properties, metadata. Resize the file. Use the file + as the destination of a copy operation within the same account. + :param bool delete: + Delete the file. + """ + def __init__(self, read=False, create=False, write=False, delete=False): + self.read = read + self.create = create + self.write = write + self.delete = delete + self._str = (('r' if self.read else '') + + ('c' if self.create else '') + + ('w' if self.write else '') + + ('d' if self.delete else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a FileSasPermissions from a string. + + To specify read, create, write, or delete permissions you need only to + include the first letter of the word in the string. E.g. For read and + create permissions, you would provide a string "rc". + + :param str permission: The string which dictates the read, create, + write, or delete permissions + :return: A FileSasPermissions object + :rtype: ~azure.storage.fileshare.FileSasPermissions + """ + p_read = 'r' in permission + p_create = 'c' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + + parsed = cls(p_read, p_create, p_write, p_delete) + + return parsed + + +class ShareSasPermissions(object): + """ShareSasPermissions class to be used to be used with + generating shared access signature and access policy operations. + + :param bool read: + Read the content, properties or metadata of any file in the share. Use any + file in the share as the source of a copy operation. + :param bool write: + For any file in the share, create or write content, properties or metadata. + Resize the file. Use the file as the destination of a copy operation within + the same account. + Note: You cannot grant permissions to read or write share properties or + metadata with a service SAS. Use an account SAS instead. + :param bool delete: + Delete any file in the share. + Note: You cannot grant permissions to delete a share with a service SAS. Use + an account SAS instead. + :param bool list: + List files and directories in the share. + """ + def __init__(self, read=False, write=False, delete=False, list=False): # pylint: disable=redefined-builtin + self.read = read + self.write = write + self.delete = delete + self.list = list + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('l' if self.list else '')) + + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a ShareSasPermissions from a string. + + To specify read, write, delete, or list permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, write, + delete, or list permissions + :return: A ShareSasPermissions object + :rtype: ~azure.storage.fileshare.ShareSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_list = 'l' in permission + + parsed = cls(p_read, p_write, p_delete, p_list) + + return parsed + +class NTFSAttributes(object): + """ + Valid set of attributes to set for file or directory. + To set attribute for directory, 'Directory' should always be enabled except setting 'None' for directory. + + :ivar bool read_only: + Enable/disable 'ReadOnly' attribute for DIRECTORY or FILE + :ivar bool hidden: + Enable/disable 'Hidden' attribute for DIRECTORY or FILE + :ivar bool system: + Enable/disable 'System' attribute for DIRECTORY or FILE + :ivar bool none: + Enable/disable 'None' attribute for DIRECTORY or FILE to clear all attributes of FILE/DIRECTORY + :ivar bool directory: + Enable/disable 'Directory' attribute for DIRECTORY + :ivar bool archive: + Enable/disable 'Archive' attribute for DIRECTORY or FILE + :ivar bool temporary: + Enable/disable 'Temporary' attribute for FILE + :ivar bool offline: + Enable/disable 'Offline' attribute for DIRECTORY or FILE + :ivar bool not_content_indexed: + Enable/disable 'NotContentIndexed' attribute for DIRECTORY or FILE + :ivar bool no_scrub_data: + Enable/disable 'NoScrubData' attribute for DIRECTORY or FILE + """ + def __init__(self, read_only=False, hidden=False, system=False, none=False, directory=False, archive=False, + temporary=False, offline=False, not_content_indexed=False, no_scrub_data=False): + + self.read_only = read_only + self.hidden = hidden + self.system = system + self.none = none + self.directory = directory + self.archive = archive + self.temporary = temporary + self.offline = offline + self.not_content_indexed = not_content_indexed + self.no_scrub_data = no_scrub_data + self._str = (('ReadOnly|' if self.read_only else '') + + ('Hidden|' if self.hidden else '') + + ('System|' if self.system else '') + + ('None|' if self.none else '') + + ('Directory|' if self.directory else '') + + ('Archive|' if self.archive else '') + + ('Temporary|' if self.temporary else '') + + ('Offline|' if self.offline else '') + + ('NotContentIndexed|' if self.not_content_indexed else '') + + ('NoScrubData|' if self.no_scrub_data else '')) + + def __str__(self): + concatenated_params = self._str + return concatenated_params.strip('|') + + @classmethod + def from_string(cls, string): + """Create a NTFSAttributes from a string. + + To specify permissions you can pass in a string with the + desired permissions, e.g. "ReadOnly|Hidden|System" + + :param str string: The string which dictates the permissions. + :return: A NTFSAttributes object + :rtype: ~azure.storage.fileshare.NTFSAttributes + """ + read_only = "ReadOnly" in string + hidden = "Hidden" in string + system = "System" in string + none = "None" in string + directory = "Directory" in string + archive = "Archive" in string + temporary = "Temporary" in string + offline = "Offline" in string + not_content_indexed = "NotContentIndexed" in string + no_scrub_data = "NoScrubData" in string + + parsed = cls(read_only, hidden, system, none, directory, archive, temporary, offline, not_content_indexed, + no_scrub_data) + parsed._str = string # pylint: disable = protected-access + return parsed + + +def service_properties_deserialize(generated): + """Deserialize a ServiceProperties objects into a dict. + """ + return { + 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access + 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access + 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access + 'protocol': ShareProtocolSettings._from_generated(generated.protocol), # pylint: disable=protected-access + } diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_parser.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_parser.py new file mode 100644 index 0000000..db7cab5 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_parser.py @@ -0,0 +1,42 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from datetime import datetime, timedelta + +_ERROR_TOO_MANY_FILE_PERMISSIONS = 'file_permission and file_permission_key should not be set at the same time' +_FILE_PERMISSION_TOO_LONG = 'Size of file_permission is too large. file_permission should be <=8KB, else' \ + 'please use file_permission_key' + + +def _get_file_permission(file_permission, file_permission_key, default_permission): + # if file_permission and file_permission_key are both empty, then use the default_permission + # value as file permission, file_permission size should be <= 8KB, else file permission_key should be used + if file_permission and len(str(file_permission).encode('utf-8')) > 8 * 1024: + raise ValueError(_FILE_PERMISSION_TOO_LONG) + + if not file_permission: + if not file_permission_key: + return default_permission + return None + + if not file_permission_key: + return file_permission + + raise ValueError(_ERROR_TOO_MANY_FILE_PERMISSIONS) + + +def _parse_datetime_from_str(string_datetime): + if not string_datetime: + return None + dt, _, us = string_datetime.partition(".") + dt = datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") + us = int(us[:-2]) # microseconds + datetime_obj = dt + timedelta(microseconds=us) + return datetime_obj + + +def _datetime_to_str(datetime_obj): + return datetime_obj if isinstance(datetime_obj, str) else datetime_obj.isoformat() + '0Z' diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_serialize.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_serialize.py new file mode 100644 index 0000000..9a050da --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_serialize.py @@ -0,0 +1,119 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from azure.core import MatchConditions + +from ._parser import _datetime_to_str, _get_file_permission +from ._generated.models import SourceModifiedAccessConditions, LeaseAccessConditions, CopyFileSmbInfo + + +_SUPPORTED_API_VERSIONS = [ + '2019-02-02', + '2019-07-07', + '2019-10-10', + '2019-12-12', + '2020-02-10', + '2020-04-08', + '2020-06-12', + '2020-08-04', + '2020-10-02' +] + + +def _get_match_headers(kwargs, match_param, etag_param): + # type: (str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) + # TODO: extract this method to shared folder also add some comments, so that share, datalake and blob can use it. + if_match = None + if_none_match = None + match_condition = kwargs.pop(match_param, None) + if match_condition == MatchConditions.IfNotModified: + if_match = kwargs.pop(etag_param, None) + if not if_match: + raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) + elif match_condition == MatchConditions.IfPresent: + if_match = '*' + elif match_condition == MatchConditions.IfModified: + if_none_match = kwargs.pop(etag_param, None) + if not if_none_match: + raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) + elif match_condition == MatchConditions.IfMissing: + if_none_match = '*' + elif match_condition is None: + if etag_param in kwargs: + raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) + else: + raise TypeError("Invalid match condition: {}".format(match_condition)) + return if_match, if_none_match + + +def get_source_conditions(kwargs): + # type: (Dict[str, Any]) -> SourceModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') + return SourceModifiedAccessConditions( + source_if_modified_since=kwargs.pop('source_if_modified_since', None), + source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), + source_if_match=if_match or kwargs.pop('source_if_match', None), + source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) + ) + + +def get_access_conditions(lease): + # type: (Optional[Union[ShareLeaseClient, str]]) -> Union[LeaseAccessConditions, None] + try: + lease_id = lease.id # type: ignore + except AttributeError: + lease_id = lease # type: ignore + return LeaseAccessConditions(lease_id=lease_id) if lease_id else None + + +def get_smb_properties(kwargs): + # type: (Dict[str, Any]) -> Dict[str, Any] + ignore_read_only = kwargs.pop('ignore_read_only', None) + set_archive_attribute = kwargs.pop('set_archive_attribute', None) + file_permission = kwargs.pop('file_permission', None) + file_permission_key = kwargs.pop('permission_key', None) + file_attributes = kwargs.pop('file_attributes', None) + file_creation_time = kwargs.pop('file_creation_time', None) or "" + file_last_write_time = kwargs.pop('file_last_write_time', None) or "" + + file_permission_copy_mode = None + file_permission = _get_file_permission(file_permission, file_permission_key, None) + + if file_permission: + if file_permission.lower() == "source": + file_permission = None + file_permission_copy_mode = "source" + else: + file_permission_copy_mode = "override" + elif file_permission_key: + if file_permission_key.lower() == "source": + file_permission_key = None + file_permission_copy_mode = "source" + else: + file_permission_copy_mode = "override" + return { + 'file_permission': file_permission, + 'file_permission_key': file_permission_key, + 'copy_file_smb_info': CopyFileSmbInfo( + file_permission_copy_mode=file_permission_copy_mode, + ignore_read_only=ignore_read_only, + file_attributes=file_attributes, + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + set_archive_attribute=set_archive_attribute + ) + + } + + +def get_api_version(kwargs): + # type: (Dict[str, Any]) -> str + api_version = kwargs.get('api_version', None) + if api_version and api_version not in _SUPPORTED_API_VERSIONS: + versions = '\n'.join(_SUPPORTED_API_VERSIONS) + raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) + return api_version or _SUPPORTED_API_VERSIONS[-1] diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_share_client.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_share_client.py new file mode 100644 index 0000000..a0b317b --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_share_client.py @@ -0,0 +1,909 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Optional, Union, Dict, Any, Iterable, TYPE_CHECKING +) + + +try: + from urllib.parse import urlparse, quote, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import quote, unquote # type: ignore + +import six +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import Pipeline +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.request_handlers import add_metadata_headers, serialize_iso +from ._shared.response_handlers import ( + return_response_headers, + process_storage_error, + return_headers_and_deserialized) +from ._generated import AzureFileStorage +from ._generated.models import ( + SignedIdentifier, + DeleteSnapshotsOptionType, + SharePermission) +from ._deserialize import deserialize_share_properties, deserialize_permission_key, deserialize_permission +from ._serialize import get_api_version, get_access_conditions +from ._directory_client import ShareDirectoryClient +from ._file_client import ShareFileClient +from ._lease import ShareLeaseClient +from ._models import ShareProtocols + + +if TYPE_CHECKING: + from ._models import ShareProperties, AccessPolicy + + +class ShareClient(StorageAccountHostsMixin): + """A client to interact with a specific share, although that share may not yet exist. + + For operations relating to a specific directory or file in this share, the clients for + those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the share, + use the :func:`from_share_url` classmethod. + :param share_name: + The name of the share with which to interact. + :type share_name: str + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( # type: ignore + self, account_url, # type: str + share_name, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not share_name: + raise ValueError("Please specify a share name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + if hasattr(credential, 'get_token'): + raise ValueError("Token credentials not supported by the File service.") + + path_snapshot = None + path_snapshot, sas_token = parse_query(parsed_url.query) + if not sas_token and not credential: + raise ValueError( + 'You need to provide either an account shared key or SAS token when creating a storage service.') + try: + self.snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + self.snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + self.snapshot = snapshot or path_snapshot + + self.share_name = share_name + self._query_str, credential = self._format_query_string( + sas_token, credential, share_snapshot=self.snapshot) + super(ShareClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) + self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + @classmethod + def from_share_url(cls, share_url, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> ShareClient + """ + :param str share_url: The full URI to the share. + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials or an account + shared access key. + :returns: A share client. + :rtype: ~azure.storage.fileshare.ShareClient + """ + try: + if not share_url.lower().startswith('http'): + share_url = "https://" + share_url + except AttributeError: + raise ValueError("Share URL must be a string.") + parsed_url = urlparse(share_url.rstrip('/')) + if not (parsed_url.path and parsed_url.netloc): + raise ValueError("Invalid URL: {}".format(share_url)) + + share_path = parsed_url.path.lstrip('/').split('/') + account_path = "" + if len(share_path) > 1: + account_path = "/" + "/".join(share_path[:-1]) + account_url = "{}://{}{}?{}".format( + parsed_url.scheme, + parsed_url.netloc.rstrip('/'), + account_path, + parsed_url.query) + + share_name = unquote(share_path[-1]) + path_snapshot, _ = parse_query(parsed_url.query) + if snapshot: + try: + path_snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + path_snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + path_snapshot = snapshot + + if not share_name: + raise ValueError("Invalid URL. Please provide a URL with a valid share name") + return cls(account_url, share_name, path_snapshot, credential, **kwargs) + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + share_name = self.share_name + if isinstance(share_name, six.text_type): + share_name = share_name.encode('UTF-8') + return "{}://{}/{}{}".format( + self.scheme, + hostname, + quote(share_name), + self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + share_name, # type: str + snapshot=None, # type: Optional[str] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> ShareClient + """Create ShareClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param share_name: The name of the share. + :type share_name: str + :param str snapshot: + The optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials or an account + shared access key. + :returns: A share client. + :rtype: ~azure.storage.fileshare.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START create_share_client_from_conn_string] + :end-before: [END create_share_client_from_conn_string] + :language: python + :dedent: 8 + :caption: Gets the share client from connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, share_name=share_name, snapshot=snapshot, credential=credential, **kwargs) + + def get_directory_client(self, directory_path=None): + # type: (Optional[str]) -> ShareDirectoryClient + """Get a client to interact with the specified directory. + The directory need not already exist. + + :param str directory_path: + Path to the specified directory. + :returns: A Directory Client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + + return ShareDirectoryClient( + self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, + _location_mode=self._location_mode) + + def get_file_client(self, file_path): + # type: (str) -> ShareFileClient + """Get a client to interact with the specified file. + The file need not already exist. + + :param str file_path: + Path to the specified file. + :returns: A File Client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + + return ShareFileClient( + self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode) + + @distributed_trace + def acquire_lease(self, **kwargs): + # type: (**Any) -> ShareLeaseClient + """Requests a new lease. + + If the share does not have an active lease, the Share + Service creates a lease on the share and returns a new lease. + + .. versionadded:: 12.5.0 + + :keyword int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword str lease_id: + Proposed lease ID, in a GUID string format. The Share Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A ShareLeaseClient object. + :rtype: ~azure.storage.fileshare.ShareLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START acquire_and_release_lease_on_share] + :end-before: [END acquire_and_release_lease_on_share] + :language: python + :dedent: 8 + :caption: Acquiring a lease on a share. + """ + kwargs['lease_duration'] = kwargs.pop('lease_duration', -1) + lease_id = kwargs.pop('lease_id', None) + lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore + lease.acquire(**kwargs) + return lease + + @distributed_trace + def create_share(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Creates a new Share under the account. If a share with the + same name already exists, the operation fails. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the share as metadata. + :keyword int quota: + The quota to be allotted. + :keyword access_tier: + Specifies the access tier of the share. + Possible values: 'TransactionOptimized', 'Hot', 'Cool' + :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword protocols: + Protocols to enable on the share. Only one protocol can be enabled on the share. + :paramtype protocols: str or ~azure.storage.fileshare.ShareProtocols + :keyword root_squash: + Root squash to set on the share. + Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. + :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START create_share] + :end-before: [END create_share] + :language: python + :dedent: 8 + :caption: Creates a file share. + """ + metadata = kwargs.pop('metadata', None) + quota = kwargs.pop('quota', None) + access_tier = kwargs.pop('access_tier', None) + timeout = kwargs.pop('timeout', None) + root_squash = kwargs.pop('root_squash', None) + protocols = kwargs.pop('protocols', None) + if protocols and protocols not in ['NFS', 'SMB', ShareProtocols.SMB, ShareProtocols.NFS]: + raise ValueError("The enabled protocol must be set to either SMB or NFS.") + if root_squash and protocols not in ['NFS', ShareProtocols.NFS]: + raise ValueError("The 'root_squash' keyword can only be used on NFS enabled shares.") + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + + try: + return self._client.share.create( # type: ignore + timeout=timeout, + metadata=metadata, + quota=quota, + access_tier=access_tier, + root_squash=root_squash, + enabled_protocols=protocols, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def create_snapshot( # type: ignore + self, + **kwargs # type: Optional[Any] + ): + # type: (...) -> Dict[str, Any] + """Creates a snapshot of the share. + + A snapshot is a read-only version of a share that's taken at a point in time. + It can be read, copied, or deleted, but not modified. Snapshots provide a way + to back up a share as it appears at a moment in time. + + A snapshot of a share has the same name as the base share from which the snapshot + is taken, with a DateTime value appended to indicate the time at which the + snapshot was taken. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the share as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START create_share_snapshot] + :end-before: [END create_share_snapshot] + :language: python + :dedent: 12 + :caption: Creates a snapshot of the file share. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return self._client.share.create_snapshot( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def delete_share( + self, delete_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> None + """Marks the specified share for deletion. The share is + later deleted during garbage collection. + + :param bool delete_snapshots: + Indicates if snapshots are to be deleted. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START delete_share] + :end-before: [END delete_share] + :language: python + :dedent: 12 + :caption: Deletes the share and any snapshots. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + delete_include = None + if delete_snapshots: + delete_include = DeleteSnapshotsOptionType.include + try: + self._client.share.delete( + timeout=timeout, + sharesnapshot=self.snapshot, + lease_access_conditions=access_conditions, + delete_snapshots=delete_include, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_share_properties(self, **kwargs): + # type: (Any) -> ShareProperties + """Returns all user-defined metadata and system properties for the + specified share. The data returned does not include the shares's + list of files or directories. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: The share properties. + :rtype: ~azure.storage.fileshare.ShareProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_hello_world.py + :start-after: [START get_share_properties] + :end-before: [END get_share_properties] + :language: python + :dedent: 12 + :caption: Gets the share properties. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + props = self._client.share.get_properties( + timeout=timeout, + sharesnapshot=self.snapshot, + cls=deserialize_share_properties, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + props.name = self.share_name + props.snapshot = self.snapshot + return props # type: ignore + + @distributed_trace + def set_share_quota(self, quota, **kwargs): + # type: (int, Any) -> Dict[str, Any] + """Sets the quota for the share. + + :param int quota: + Specifies the maximum size of the share, in gigabytes. + Must be greater than 0, and less than or equal to 5TB. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START set_share_quota] + :end-before: [END set_share_quota] + :language: python + :dedent: 12 + :caption: Sets the share quota. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + return self._client.share.set_properties( # type: ignore + timeout=timeout, + quota=quota, + access_tier=None, + lease_access_conditions=access_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_share_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Sets the share properties. + + .. versionadded:: 12.4.0 + + :keyword access_tier: + Specifies the access tier of the share. + Possible values: 'TransactionOptimized', 'Hot', and 'Cool' + :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier + :keyword int quota: + Specifies the maximum size of the share, in gigabytes. + Must be greater than 0, and less than or equal to 5TB. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword root_squash: + Root squash to set on the share. + Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. + :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START set_share_properties] + :end-before: [END set_share_properties] + :language: python + :dedent: 12 + :caption: Sets the share properties. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + access_tier = kwargs.pop('access_tier', None) + quota = kwargs.pop('quota', None) + root_squash = kwargs.pop('root_squash', None) + if all(parameter is None for parameter in [access_tier, quota, root_squash]): + raise ValueError("set_share_properties should be called with at least one parameter.") + try: + return self._client.share.set_properties( # type: ignore + timeout=timeout, + quota=quota, + access_tier=access_tier, + root_squash=root_squash, + lease_access_conditions=access_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_share_metadata(self, metadata, **kwargs): + # type: (Dict[str, Any], Any) -> Dict[str, Any] + """Sets the metadata for the share. + + Each call to this operation replaces all existing metadata + attached to the share. To remove all metadata from the share, + call this operation with no metadata dict. + + :param metadata: + Name-value pairs associated with the share as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START set_share_metadata] + :end-before: [END set_share_metadata] + :language: python + :dedent: 12 + :caption: Sets the share metadata. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + try: + return self._client.share.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_share_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the share. The permissions + indicate whether files in a share may be accessed publicly. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + response, identifiers = self._client.share.get_access_policy( + timeout=timeout, + cls=return_headers_and_deserialized, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return { + 'public_access': response.get('share_public_access'), + 'signed_identifiers': identifiers or [] + } + + @distributed_trace + def set_share_access_policy(self, signed_identifiers, **kwargs): + # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] + """Sets the permissions for the share, or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether files in a share may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the share. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + if len(signed_identifiers) > 5: + raise ValueError( + 'Too many access policies provided. The server does not support setting ' + 'more than 5 access policies on a single resource.') + identifiers = [] + for key, value in signed_identifiers.items(): + if value: + value.start = serialize_iso(value.start) + value.expiry = serialize_iso(value.expiry) + identifiers.append(SignedIdentifier(id=key, access_policy=value)) + signed_identifiers = identifiers # type: ignore + try: + return self._client.share.set_access_policy( # type: ignore + share_acl=signed_identifiers or None, + timeout=timeout, + cls=return_response_headers, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_share_stats(self, **kwargs): + # type: (Any) -> int + """Gets the approximate size of the data stored on the share in bytes. + + Note that this value may not include all recently created + or recently re-sized files. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :return: The approximate size of the data (in bytes) stored on the share. + :rtype: int + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + stats = self._client.share.get_statistics( + timeout=timeout, + lease_access_conditions=access_conditions, + **kwargs) + return stats.share_usage_bytes # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_directories_and_files( + self, directory_name=None, # type: Optional[str] + name_starts_with=None, # type: Optional[str] + marker=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Iterable[Dict[str,str]] + """Lists the directories and files under the share. + + :param str directory_name: + Name of a directory. + :param str name_starts_with: + Filters the results to return only directories whose names + begin with the specified prefix. + :param str marker: + An opaque continuation token. This value can be retrieved from the + next_marker field of a previous generator object. If specified, + this generator will begin returning results from this point. + :keyword list[str] include: + Include this parameter to specify one or more datasets to include in the response. + Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey". + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-10-02'. + + :keyword bool include_extended_info: + If this is set to true, file id will be returned in listed results. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-10-02'. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START share_list_files_in_dir] + :end-before: [END share_list_files_in_dir] + :language: python + :dedent: 12 + :caption: List directories and files in the share. + """ + timeout = kwargs.pop('timeout', None) + directory = self.get_directory_client(directory_name) + kwargs.setdefault('merge_span', True) + return directory.list_directories_and_files( + name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) + + @staticmethod + def _create_permission_for_share_options(file_permission, # type: str + **kwargs): + options = { + 'share_permission': SharePermission(permission=file_permission), + 'cls': deserialize_permission_key, + 'timeout': kwargs.pop('timeout', None), + } + options.update(kwargs) + return options + + @distributed_trace + def create_permission_for_share(self, file_permission, # type: str + **kwargs # type: Any + ): + # type: (...) -> str + """Create a permission (a security descriptor) at the share level. + + This 'permission' can be used for the files/directories in the share. + If a 'permission' already exists, it shall return the key of it, else + creates a new permission at the share level and return its key. + + :param str file_permission: + File permission, a Portable SDDL + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A file permission key + :rtype: str + """ + timeout = kwargs.pop('timeout', None) + options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) + try: + return self._client.share.create_permission(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_permission_for_share( # type: ignore + self, permission_key, # type: str + **kwargs # type: Any + ): + # type: (...) -> str + """Get a permission (a security descriptor) for a given key. + + This 'permission' can be used for the files/directories in the share. + + :param str permission_key: + Key of the file permission to retrieve + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A file permission (a portable SDDL) + :rtype: str + """ + timeout = kwargs.pop('timeout', None) + try: + return self._client.share.get_permission( # type: ignore + file_permission_key=permission_key, + cls=deserialize_permission, + timeout=timeout, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def create_directory(self, directory_name, **kwargs): + # type: (str, Any) -> ShareDirectoryClient + """Creates a directory in the share and returns a client to interact + with the directory. + + :param str directory_name: + The name of the directory. + :keyword metadata: + Name-value pairs associated with the directory as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: ShareDirectoryClient + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + directory = self.get_directory_client(directory_name) + kwargs.setdefault('merge_span', True) + directory.create_directory(**kwargs) + return directory # type: ignore + + @distributed_trace + def delete_directory(self, directory_name, **kwargs): + # type: (str, Any) -> None + """Marks the directory for deletion. The directory is + later deleted during garbage collection. + + :param str directory_name: + The name of the directory. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + directory = self.get_directory_client(directory_name) + directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_share_service_client.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_share_service_client.py new file mode 100644 index 0000000..537a0e5 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_share_service_client.py @@ -0,0 +1,423 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Dict, List, + TYPE_CHECKING +) + + +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse # type: ignore + +from azure.core.exceptions import HttpResponseError +from azure.core.paging import ItemPaged +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import Pipeline +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.response_handlers import process_storage_error +from ._generated import AzureFileStorage +from ._generated.models import StorageServiceProperties +from ._share_client import ShareClient +from ._serialize import get_api_version +from ._models import ( + SharePropertiesPaged, + service_properties_deserialize, +) + +if TYPE_CHECKING: + from datetime import datetime + from ._models import ( + ShareProperties, + Metrics, + CorsRule, + ShareProtocolSettings + ) + + +class ShareServiceClient(StorageAccountHostsMixin): + """A client to interact with the File Share Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete shares within the account. + For operations relating to a specific share, a client for that entity + can also be retrieved using the :func:`get_share_client` function. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URL to the file share storage account. Any other entities included + in the URL path (e.g. share or file) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_authentication.py + :start-after: [START create_share_service_client] + :end-before: [END create_share_service_client] + :language: python + :dedent: 8 + :caption: Create the share service client with url and credential. + """ + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + if hasattr(credential, 'get_token'): + raise ValueError("Token credentials not supported by the File Share service.") + + _, sas_token = parse_query(parsed_url.query) + if not sas_token and not credential: + raise ValueError( + 'You need to provide either an account shared key or SAS token when creating a storage service.') + self._query_str, credential = self._format_query_string(sas_token, credential) + super(ShareServiceClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) + self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + return "{}://{}/{}".format(self.scheme, hostname, self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): # type: (...) -> ShareServiceClient + """Create ShareServiceClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials or an account + shared access key. + :returns: A File Share service client. + :rtype: ~azure.storage.fileshare.ShareServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_authentication.py + :start-after: [START create_share_service_client_from_conn_string] + :end-before: [END create_share_service_client_from_conn_string] + :language: python + :dedent: 8 + :caption: Create the share service client with connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls(account_url, credential=credential, **kwargs) + + @distributed_trace + def get_service_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the properties of a storage account's File Share service, including + Azure Storage Analytics. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A dictionary containing file service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START get_service_properties] + :end-before: [END get_service_properties] + :language: python + :dedent: 8 + :caption: Get file share service properties. + """ + timeout = kwargs.pop('timeout', None) + try: + service_props = self._client.service.get_properties(timeout=timeout, **kwargs) + return service_properties_deserialize(service_props) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_service_properties( + self, hour_metrics=None, # type: Optional[Metrics] + minute_metrics=None, # type: Optional[Metrics] + cors=None, # type: Optional[List[CorsRule]] + protocol=None, # type: Optional[ShareProtocolSettings], + **kwargs + ): + # type: (...) -> None + """Sets the properties of a storage account's File Share service, including + Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the + existing settings on the service for that functionality are preserved. + + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for files. + :type hour_metrics: ~azure.storage.fileshare.Metrics + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for files. + :type minute_metrics: ~azure.storage.fileshare.Metrics + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) + :param protocol: + Sets protocol settings + :type protocol: ~azure.storage.fileshare.ShareProtocolSettings + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START set_service_properties] + :end-before: [END set_service_properties] + :language: python + :dedent: 8 + :caption: Sets file share service properties. + """ + timeout = kwargs.pop('timeout', None) + props = StorageServiceProperties( + hour_metrics=hour_metrics, + minute_metrics=minute_metrics, + cors=cors, + protocol=protocol + ) + try: + self._client.service.set_properties(storage_service_properties=props, timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_shares( + self, name_starts_with=None, # type: Optional[str] + include_metadata=False, # type: Optional[bool] + include_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> ItemPaged[ShareProperties] + """Returns auto-paging iterable of dict-like ShareProperties under the specified account. + The generator will lazily follow the continuation tokens returned by + the service and stop when all shares have been returned. + + :param str name_starts_with: + Filters the results to return only shares whose names + begin with the specified name_starts_with. + :param bool include_metadata: + Specifies that share metadata be returned in the response. + :param bool include_snapshots: + Specifies that share snapshot be returned in the response. + :keyword bool include_deleted: + Specifies that deleted shares be returned in the response. + This is only for share soft delete enabled account. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) of ShareProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.ShareProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START fsc_list_shares] + :end-before: [END fsc_list_shares] + :language: python + :dedent: 12 + :caption: List shares in the file share service. + """ + timeout = kwargs.pop('timeout', None) + include = [] + include_deleted = kwargs.pop('include_deleted', None) + if include_deleted: + include.append("deleted") + if include_metadata: + include.append('metadata') + if include_snapshots: + include.append('snapshots') + + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.service.list_shares_segment, + include=include, + timeout=timeout, + **kwargs) + return ItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=SharePropertiesPaged) + + @distributed_trace + def create_share( + self, share_name, # type: str + **kwargs + ): + # type: (...) -> ShareClient + """Creates a new share under the specified account. If the share + with the same name already exists, the operation fails. Returns a client with + which to interact with the newly created share. + + :param str share_name: The name of the share to create. + :keyword dict(str,str) metadata: + A dict with name_value pairs to associate with the + share as metadata. Example:{'Category':'test'} + :keyword int quota: + Quota in bytes. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.fileshare.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START fsc_create_shares] + :end-before: [END fsc_create_shares] + :language: python + :dedent: 8 + :caption: Create a share in the file share service. + """ + metadata = kwargs.pop('metadata', None) + quota = kwargs.pop('quota', None) + timeout = kwargs.pop('timeout', None) + share = self.get_share_client(share_name) + kwargs.setdefault('merge_span', True) + share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) + return share + + @distributed_trace + def delete_share( + self, share_name, # type: Union[ShareProperties, str] + delete_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> None + """Marks the specified share for deletion. The share is + later deleted during garbage collection. + + :param share_name: + The share to delete. This can either be the name of the share, + or an instance of ShareProperties. + :type share_name: str or ~azure.storage.fileshare.ShareProperties + :param bool delete_snapshots: + Indicates if snapshots are to be deleted. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START fsc_delete_shares] + :end-before: [END fsc_delete_shares] + :language: python + :dedent: 12 + :caption: Delete a share in the file share service. + """ + timeout = kwargs.pop('timeout', None) + share = self.get_share_client(share_name) + kwargs.setdefault('merge_span', True) + share.delete_share( + delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) + + @distributed_trace + def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): + # type: (str, str, **Any) -> ShareClient + """Restores soft-deleted share. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.2.0 + This operation was introduced in API version '2019-12-12'. + + :param str deleted_share_name: + Specifies the name of the deleted share to restore. + :param str deleted_share_version: + Specifies the version of the deleted share to restore. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.fileshare.ShareClient + """ + share = self.get_share_client(deleted_share_name) + + try: + share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access + deleted_share_version=deleted_share_version, + timeout=kwargs.pop('timeout', None), **kwargs) + return share + except HttpResponseError as error: + process_storage_error(error) + + def get_share_client(self, share, snapshot=None): + # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient + """Get a client to interact with the specified share. + The share need not already exist. + + :param share: + The share. This can either be the name of the share, + or an instance of ShareProperties. + :type share: str or ~azure.storage.fileshare.ShareProperties + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :returns: A ShareClient. + :rtype: ~azure.storage.fileshare.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START get_share_client] + :end-before: [END get_share_client] + :language: python + :dedent: 8 + :caption: Gets the share client. + """ + try: + share_name = share.name + except AttributeError: + share_name = share + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareClient( + self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, + api_version=self.api_version, _hosts=self._hosts, + _configuration=self._config, _pipeline=_pipeline, _location_mode=self._location_mode) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/__init__.py new file mode 100644 index 0000000..160f882 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/__init__.py @@ -0,0 +1,56 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import hmac + +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore + +import six + + +def url_quote(url): + return quote(url) + + +def url_unquote(url): + return unquote(url) + + +def encode_base64(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def decode_base64_to_bytes(data): + if isinstance(data, six.text_type): + data = data.encode('utf-8') + return base64.b64decode(data) + + +def decode_base64_to_text(data): + decoded_bytes = decode_base64_to_bytes(data) + return decoded_bytes.decode('utf-8') + + +def sign_string(key, string_to_sign, key_is_base64=True): + if key_is_base64: + key = decode_base64_to_bytes(key) + else: + if isinstance(key, six.text_type): + key = key.encode('utf-8') + if isinstance(string_to_sign, six.text_type): + string_to_sign = string_to_sign.encode('utf-8') + signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) + digest = signed_hmac_sha256.digest() + encoded_digest = encode_base64(digest) + return encoded_digest diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/authentication.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/authentication.py new file mode 100644 index 0000000..d04c1e4 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/authentication.py @@ -0,0 +1,142 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import logging +import sys + +try: + from urllib.parse import urlparse, unquote +except ImportError: + from urlparse import urlparse # type: ignore + from urllib2 import unquote # type: ignore + +try: + from yarl import URL +except ImportError: + pass + +try: + from azure.core.pipeline.transport import AioHttpTransport +except ImportError: + AioHttpTransport = None + +from azure.core.exceptions import ClientAuthenticationError +from azure.core.pipeline.policies import SansIOHTTPPolicy + +from . import sign_string + + +logger = logging.getLogger(__name__) + + + +# wraps a given exception with the desired exception type +def _wrap_exception(ex, desired_type): + msg = "" + if ex.args: + msg = ex.args[0] + if sys.version_info >= (3,): + # Automatic chaining in Python 3 means we keep the trace + return desired_type(msg) + # There isn't a good solution in 2 for keeping the stack trace + # in general, or that will not result in an error in 3 + # However, we can keep the previous error type and message + # TODO: In the future we will log the trace + return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) + + +class AzureSigningError(ClientAuthenticationError): + """ + Represents a fatal error when attempting to sign a request. + In general, the cause of this exception is user error. For example, the given account key is not valid. + Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. + """ + + +# pylint: disable=no-self-use +class SharedKeyCredentialPolicy(SansIOHTTPPolicy): + + def __init__(self, account_name, account_key): + self.account_name = account_name + self.account_key = account_key + super(SharedKeyCredentialPolicy, self).__init__() + + @staticmethod + def _get_headers(request, headers_to_sign): + headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) + if 'content-length' in headers and headers['content-length'] == '0': + del headers['content-length'] + return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' + + @staticmethod + def _get_verb(request): + return request.http_request.method + '\n' + + def _get_canonicalized_resource(self, request): + uri_path = urlparse(request.http_request.url).path + try: + if isinstance(request.context.transport, AioHttpTransport) or \ + isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ + isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), + AioHttpTransport): + uri_path = URL(uri_path) + return '/' + self.account_name + str(uri_path) + except TypeError: + pass + return '/' + self.account_name + uri_path + + @staticmethod + def _get_canonicalized_headers(request): + string_to_sign = '' + x_ms_headers = [] + for name, value in request.http_request.headers.items(): + if name.startswith('x-ms-'): + x_ms_headers.append((name.lower(), value)) + x_ms_headers.sort() + for name, value in x_ms_headers: + if value is not None: + string_to_sign += ''.join([name, ':', value, '\n']) + return string_to_sign + + @staticmethod + def _get_canonicalized_resource_query(request): + sorted_queries = list(request.http_request.query.items()) + sorted_queries.sort() + + string_to_sign = '' + for name, value in sorted_queries: + if value is not None: + string_to_sign += '\n' + name.lower() + ':' + unquote(value) + + return string_to_sign + + def _add_authorization_header(self, request, string_to_sign): + try: + signature = sign_string(self.account_key, string_to_sign) + auth_string = 'SharedKey ' + self.account_name + ':' + signature + request.http_request.headers['Authorization'] = auth_string + except Exception as ex: + # Wrap any error that occurred as signing error + # Doing so will clarify/locate the source of problem + raise _wrap_exception(ex, AzureSigningError) + + def on_request(self, request): + string_to_sign = \ + self._get_verb(request) + \ + self._get_headers( + request, + [ + 'content-encoding', 'content-language', 'content-length', + 'content-md5', 'content-type', 'date', 'if-modified-since', + 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' + ] + ) + \ + self._get_canonicalized_headers(request) + \ + self._get_canonicalized_resource(request) + \ + self._get_canonicalized_resource_query(request) + + self._add_authorization_header(request, string_to_sign) + #logger.debug("String_to_sign=%s", string_to_sign) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/base_client.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/base_client.py new file mode 100644 index 0000000..5e524b2 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/base_client.py @@ -0,0 +1,459 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import logging +import uuid +from typing import ( # pylint: disable=unused-import + Optional, + Any, + Tuple, +) + +try: + from urllib.parse import parse_qs, quote +except ImportError: + from urlparse import parse_qs # type: ignore + from urllib2 import quote # type: ignore + +import six + +from azure.core.configuration import Configuration +from azure.core.credentials import AzureSasCredential +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline import Pipeline +from azure.core.pipeline.transport import RequestsTransport, HttpTransport +from azure.core.pipeline.policies import ( + RedirectPolicy, + ContentDecodePolicy, + BearerTokenCredentialPolicy, + ProxyPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, + UserAgentPolicy, + AzureSasCredentialPolicy +) + +from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .models import LocationMode +from .authentication import SharedKeyCredentialPolicy +from .shared_access_signature import QueryStringConstants +from .request_handlers import serialize_batch_body, _get_batch_request_delimiter +from .policies import ( + StorageHeadersPolicy, + StorageContentValidation, + StorageRequestHook, + StorageResponseHook, + StorageLoggingPolicy, + StorageHosts, + QueueMessagePolicy, + ExponentialRetry, +) +from .._version import VERSION +from .response_handlers import process_storage_error, PartialBatchErrorException + + +_LOGGER = logging.getLogger(__name__) +_SERVICE_PARAMS = { + "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, + "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, + "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, + "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, +} + +class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + parsed_url, # type: Any + service, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) + self._hosts = kwargs.get("_hosts") + self.scheme = parsed_url.scheme + + if service not in ["blob", "queue", "file-share", "dfs"]: + raise ValueError("Invalid service: {}".format(service)) + service_name = service.split('-')[0] + account = parsed_url.netloc.split(".{}.core.".format(service_name)) + + self.account_name = account[0] if len(account) > 1 else None + if not self.account_name and parsed_url.netloc.startswith("localhost") \ + or parsed_url.netloc.startswith("127.0.0.1"): + self.account_name = parsed_url.path.strip("/") + + self.credential = _format_shared_key_credential(self.account_name, credential) + if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): + raise ValueError("Token credential is only supported with HTTPS.") + + secondary_hostname = None + if hasattr(self.credential, "account_name"): + self.account_name = self.credential.account_name + secondary_hostname = "{}-secondary.{}.{}".format( + self.credential.account_name, service_name, SERVICE_HOST_BASE) + + if not self._hosts: + if len(account) > 1: + secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") + if kwargs.get("secondary_hostname"): + secondary_hostname = kwargs["secondary_hostname"] + primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') + self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} + + self.require_encryption = kwargs.get("require_encryption", False) + self.key_encryption_key = kwargs.get("key_encryption_key") + self.key_resolver_function = kwargs.get("key_resolver_function") + self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) + + def __enter__(self): + self._client.__enter__() + return self + + def __exit__(self, *args): + self._client.__exit__(*args) + + def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._client.close() + + @property + def url(self): + """The full endpoint URL to this entity, including SAS token if used. + + This could be either the primary endpoint, + or the secondary endpoint depending on the current :func:`location_mode`. + """ + return self._format_url(self._hosts[self._location_mode]) + + @property + def primary_endpoint(self): + """The full primary endpoint URL. + + :type: str + """ + return self._format_url(self._hosts[LocationMode.PRIMARY]) + + @property + def primary_hostname(self): + """The hostname of the primary endpoint. + + :type: str + """ + return self._hosts[LocationMode.PRIMARY] + + @property + def secondary_endpoint(self): + """The full secondary endpoint URL if configured. + + If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str + :raise ValueError: + """ + if not self._hosts[LocationMode.SECONDARY]: + raise ValueError("No secondary host configured.") + return self._format_url(self._hosts[LocationMode.SECONDARY]) + + @property + def secondary_hostname(self): + """The hostname of the secondary endpoint. + + If not available this will be None. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str or None + """ + return self._hosts[LocationMode.SECONDARY] + + @property + def location_mode(self): + """The location mode that the client is currently using. + + By default this will be "primary". Options include "primary" and "secondary". + + :type: str + """ + + return self._location_mode + + @location_mode.setter + def location_mode(self, value): + if self._hosts.get(value): + self._location_mode = value + self._client._config.url = self.url # pylint: disable=protected-access + else: + raise ValueError("No host URL for location mode: {}".format(value)) + + @property + def api_version(self): + """The version of the Storage API used for requests. + + :type: str + """ + return self._client._config.version # pylint: disable=protected-access + + def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): + query_str = "?" + if snapshot: + query_str += "snapshot={}&".format(self.snapshot) + if share_snapshot: + query_str += "sharesnapshot={}&".format(self.snapshot) + if sas_token and isinstance(credential, AzureSasCredential): + raise ValueError( + "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") + if sas_token and not credential: + query_str += sas_token + elif is_credential_sastoken(credential): + query_str += credential.lstrip("?") + credential = None + return query_str.rstrip("?&"), credential + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, "get_token"): + self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif isinstance(credential, AzureSasCredential): + self._credential_policy = AzureSasCredentialPolicy(credential) + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + + config = kwargs.get("_configuration") or create_configuration(**kwargs) + if kwargs.get("_pipeline"): + return config, kwargs["_pipeline"] + config.transport = kwargs.get("transport") # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + config.transport = RequestsTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + ContentDecodePolicy(response_encoding="utf-8"), + RedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), + config.retry_policy, + config.headers_policy, + StorageRequestHook(**kwargs), + self._credential_policy, + config.logging_policy, + StorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs) + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, Pipeline(config.transport, policies=policies) + + def _batch_send( + self, + *reqs, # type: HttpRequest + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + batch_id = str(uuid.uuid1()) + + request = self._client._client.post( # pylint: disable=protected-access + url='{}://{}/{}?{}comp=batch{}{}'.format( + self.scheme, + self.primary_hostname, + kwargs.pop('path', ""), + kwargs.pop('restype', ""), + kwargs.pop('sas', ""), + kwargs.pop('timeout', "") + ), + headers={ + 'x-ms-version': self.api_version, + "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) + } + ) + + policies = [StorageHeadersPolicy()] + if self._credential_policy: + policies.append(self._credential_policy) + + request.set_multipart_mixed( + *reqs, + policies=policies, + enforce_https=False + ) + + Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access + body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) + request.set_bytes_body(body) + + temp = request.multipart_mixed_info + request.multipart_mixed_info = None + pipeline_response = self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + request.multipart_mixed_info = temp + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() + if raise_on_any_failure: + parts = list(response.parts()) + if any(p for p in parts if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts + ) + raise error + return iter(parts) + return parts + except HttpResponseError as error: + process_storage_error(error) + +class TransportWrapper(HttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, transport): + self._transport = transport + + def send(self, request, **kwargs): + return self._transport.send(request, **kwargs) + + def open(self): + pass + + def close(self): + pass + + def __enter__(self): + pass + + def __exit__(self, *args): # pylint: disable=arguments-differ + pass + + +def _format_shared_key_credential(account_name, credential): + if isinstance(credential, six.string_types): + if not account_name: + raise ValueError("Unable to determine account name for shared key credential.") + credential = {"account_name": account_name, "account_key": credential} + if isinstance(credential, dict): + if "account_name" not in credential: + raise ValueError("Shared key credential missing 'account_name") + if "account_key" not in credential: + raise ValueError("Shared key credential missing 'account_key") + return SharedKeyCredentialPolicy(**credential) + return credential + + +def parse_connection_str(conn_str, credential, service): + conn_str = conn_str.rstrip(";") + conn_settings = [s.split("=", 1) for s in conn_str.split(";")] + if any(len(tup) != 2 for tup in conn_settings): + raise ValueError("Connection string is either blank or malformed.") + conn_settings = dict((key.upper(), val) for key, val in conn_settings) + endpoints = _SERVICE_PARAMS[service] + primary = None + secondary = None + if not credential: + try: + credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} + except KeyError: + credential = conn_settings.get("SHAREDACCESSSIGNATURE") + if endpoints["primary"] in conn_settings: + primary = conn_settings[endpoints["primary"]] + if endpoints["secondary"] in conn_settings: + secondary = conn_settings[endpoints["secondary"]] + else: + if endpoints["secondary"] in conn_settings: + raise ValueError("Connection string specifies only secondary endpoint.") + try: + primary = "{}://{}.{}.{}".format( + conn_settings["DEFAULTENDPOINTSPROTOCOL"], + conn_settings["ACCOUNTNAME"], + service, + conn_settings["ENDPOINTSUFFIX"], + ) + secondary = "{}-secondary.{}.{}".format( + conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] + ) + except KeyError: + pass + + if not primary: + try: + primary = "https://{}.{}.{}".format( + conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) + ) + except KeyError: + raise ValueError("Connection string missing required connection details.") + return primary, secondary, credential + + +def create_configuration(**kwargs): + # type: (**Any) -> Configuration + config = Configuration(**kwargs) + config.headers_policy = StorageHeadersPolicy(**kwargs) + config.user_agent_policy = UserAgentPolicy( + sdk_moniker="storage-{}/{}".format(kwargs.pop('storage_sdk'), VERSION), **kwargs) + config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + config.logging_policy = StorageLoggingPolicy(**kwargs) + config.proxy_policy = ProxyPolicy(**kwargs) + + # Storage settings + config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) + config.copy_polling_interval = 15 + + # Block blob uploads + config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) + config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) + config.use_byte_buffer = kwargs.get("use_byte_buffer", False) + + # Page blob uploads + config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) + + # Datalake file uploads + config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) + + # Blob downloads + config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) + config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) + + # File uploads + config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) + return config + + +def parse_query(query_str): + sas_values = QueryStringConstants.to_list() + parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} + sas_params = ["{}={}".format(k, quote(v, safe='')) for k, v in parsed_query.items() if k in sas_values] + sas_token = None + if sas_params: + sas_token = "&".join(sas_params) + + snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") + return snapshot, sas_token + + +def is_credential_sastoken(credential): + if not credential or not isinstance(credential, six.string_types): + return False + + sas_values = QueryStringConstants.to_list() + parsed_query = parse_qs(credential.lstrip("?")) + if parsed_query and all([k in sas_values for k in parsed_query.keys()]): + return True + return False diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/base_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/base_client_async.py new file mode 100644 index 0000000..091c350 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/base_client_async.py @@ -0,0 +1,183 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging + +from azure.core.credentials import AzureSasCredential +from azure.core.pipeline import AsyncPipeline +from azure.core.async_paging import AsyncList +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline.policies import ( + ContentDecodePolicy, + AsyncBearerTokenCredentialPolicy, + AsyncRedirectPolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, + AzureSasCredentialPolicy, +) +from azure.core.pipeline.transport import AsyncHttpTransport + +from .constants import STORAGE_OAUTH_SCOPE, CONNECTION_TIMEOUT, READ_TIMEOUT +from .authentication import SharedKeyCredentialPolicy +from .base_client import create_configuration +from .policies import ( + StorageContentValidation, + StorageRequestHook, + StorageHosts, + StorageHeadersPolicy, + QueueMessagePolicy +) +from .policies_async import AsyncStorageResponseHook + +from .response_handlers import process_storage_error, PartialBatchErrorException + +if TYPE_CHECKING: + from azure.core.pipeline import Pipeline + from azure.core.pipeline.transport import HttpRequest + from azure.core.configuration import Configuration +_LOGGER = logging.getLogger(__name__) + + +class AsyncStorageAccountHostsMixin(object): + + def __enter__(self): + raise TypeError("Async client only supports 'async with'.") + + def __exit__(self, *args): + pass + + async def __aenter__(self): + await self._client.__aenter__() + return self + + async def __aexit__(self, *args): + await self._client.__aexit__(*args) + + async def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._client.close() + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, 'get_token'): + self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif isinstance(credential, AzureSasCredential): + self._credential_policy = AzureSasCredentialPolicy(credential) + elif credential is not None: + raise TypeError("Unsupported credential: {}".format(credential)) + config = kwargs.get('_configuration') or create_configuration(**kwargs) + if kwargs.get('_pipeline'): + return config, kwargs['_pipeline'] + config.transport = kwargs.get('transport') # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + try: + from azure.core.pipeline.transport import AioHttpTransport + except ImportError: + raise ImportError("Unable to create async transport. Please check aiohttp is installed.") + config.transport = AioHttpTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.headers_policy, + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + StorageRequestHook(**kwargs), + self._credential_policy, + ContentDecodePolicy(response_encoding="utf-8"), + AsyncRedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), # type: ignore + config.retry_policy, + config.logging_policy, + AsyncStorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs), + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, AsyncPipeline(config.transport, policies=policies) + + async def _batch_send( + self, *reqs: 'HttpRequest', + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + request = self._client._client.post( # pylint: disable=protected-access + url='https://{}/?comp=batch'.format(self.primary_hostname), + headers={ + 'x-ms-version': self.api_version + } + ) + + request.set_multipart_mixed( + *reqs, + policies=[ + StorageHeadersPolicy(), + self._credential_policy + ], + enforce_https=False + ) + + pipeline_response = await self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() # Return an AsyncIterator + if raise_on_any_failure: + parts_list = [] + async for part in parts: + parts_list.append(part) + if any(p for p in parts_list if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts_list + ) + raise error + return AsyncList(parts_list) + return parts + except HttpResponseError as error: + process_storage_error(error) + + +class AsyncTransportWrapper(AsyncHttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, async_transport): + self._transport = async_transport + + async def send(self, request, **kwargs): + return await self._transport.send(request, **kwargs) + + async def open(self): + pass + + async def close(self): + pass + + async def __aenter__(self): + pass + + async def __aexit__(self, *args): # pylint: disable=arguments-differ + pass diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/constants.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/constants.py new file mode 100644 index 0000000..66f9a47 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/constants.py @@ -0,0 +1,26 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +from .._generated import AzureFileStorage + + +X_MS_VERSION = AzureFileStorage(url="get_api_version")._config.version # pylint: disable=protected-access + +# Socket timeout in seconds +CONNECTION_TIMEOUT = 20 +READ_TIMEOUT = 20 + +# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) +# The socket timeout is now the maximum total duration to send all data. +if sys.version_info >= (3, 5): + # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds + # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) + READ_TIMEOUT = 2000 + +STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" + +SERVICE_HOST_BASE = 'core.windows.net' diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/encryption.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/encryption.py new file mode 100644 index 0000000..62607cc --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/encryption.py @@ -0,0 +1,542 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os +from os import urandom +from json import ( + dumps, + loads, +) +from collections import OrderedDict + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import Cipher +from cryptography.hazmat.primitives.ciphers.algorithms import AES +from cryptography.hazmat.primitives.ciphers.modes import CBC +from cryptography.hazmat.primitives.padding import PKCS7 + +from azure.core.exceptions import HttpResponseError + +from .._version import VERSION +from . import encode_base64, decode_base64_to_bytes + + +_ENCRYPTION_PROTOCOL_V1 = '1.0' +_ERROR_OBJECT_INVALID = \ + '{0} does not define a complete interface. Value of {1} is either missing or invalid.' + + +def _validate_not_none(param_name, param): + if param is None: + raise ValueError('{0} should not be None.'.format(param_name)) + + +def _validate_key_encryption_key_wrap(kek): + # Note that None is not callable and so will fail the second clause of each check. + if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) + if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) + + +class _EncryptionAlgorithm(object): + ''' + Specifies which client encryption algorithm is used. + ''' + AES_CBC_256 = 'AES_CBC_256' + + +class _WrappedContentKey: + ''' + Represents the envelope key details stored on the service. + ''' + + def __init__(self, algorithm, encrypted_key, key_id): + ''' + :param str algorithm: + The algorithm used for wrapping. + :param bytes encrypted_key: + The encrypted content-encryption-key. + :param str key_id: + The key-encryption-key identifier string. + ''' + + _validate_not_none('algorithm', algorithm) + _validate_not_none('encrypted_key', encrypted_key) + _validate_not_none('key_id', key_id) + + self.algorithm = algorithm + self.encrypted_key = encrypted_key + self.key_id = key_id + + +class _EncryptionAgent: + ''' + Represents the encryption agent stored on the service. + It consists of the encryption protocol version and encryption algorithm used. + ''' + + def __init__(self, encryption_algorithm, protocol): + ''' + :param _EncryptionAlgorithm encryption_algorithm: + The algorithm used for encrypting the message contents. + :param str protocol: + The protocol version used for encryption. + ''' + + _validate_not_none('encryption_algorithm', encryption_algorithm) + _validate_not_none('protocol', protocol) + + self.encryption_algorithm = str(encryption_algorithm) + self.protocol = protocol + + +class _EncryptionData: + ''' + Represents the encryption data that is stored on the service. + ''' + + def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, + key_wrapping_metadata): + ''' + :param bytes content_encryption_IV: + The content encryption initialization vector. + :param _EncryptionAgent encryption_agent: + The encryption agent. + :param _WrappedContentKey wrapped_content_key: + An object that stores the wrapping algorithm, the key identifier, + and the encrypted key bytes. + :param dict key_wrapping_metadata: + A dict containing metadata related to the key wrapping. + ''' + + _validate_not_none('content_encryption_IV', content_encryption_IV) + _validate_not_none('encryption_agent', encryption_agent) + _validate_not_none('wrapped_content_key', wrapped_content_key) + + self.content_encryption_IV = content_encryption_IV + self.encryption_agent = encryption_agent + self.wrapped_content_key = wrapped_content_key + self.key_wrapping_metadata = key_wrapping_metadata + + +def _generate_encryption_data_dict(kek, cek, iv): + ''' + Generates and returns the encryption metadata as a dict. + + :param object kek: The key encryption key. See calling functions for more information. + :param bytes cek: The content encryption key. + :param bytes iv: The initialization vector. + :return: A dict containing all the encryption metadata. + :rtype: dict + ''' + # Encrypt the cek. + wrapped_cek = kek.wrap_key(cek) + + # Build the encryption_data dict. + # Use OrderedDict to comply with Java's ordering requirement. + wrapped_content_key = OrderedDict() + wrapped_content_key['KeyId'] = kek.get_kid() + wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) + wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() + + encryption_agent = OrderedDict() + encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 + encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 + + encryption_data_dict = OrderedDict() + encryption_data_dict['WrappedContentKey'] = wrapped_content_key + encryption_data_dict['EncryptionAgent'] = encryption_agent + encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) + encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} + + return encryption_data_dict + + +def _dict_to_encryption_data(encryption_data_dict): + ''' + Converts the specified dictionary to an EncryptionData object for + eventual use in decryption. + + :param dict encryption_data_dict: + The dictionary containing the encryption data. + :return: an _EncryptionData object built from the dictionary. + :rtype: _EncryptionData + ''' + try: + if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: + raise ValueError("Unsupported encryption version.") + except KeyError: + raise ValueError("Unsupported encryption version.") + wrapped_content_key = encryption_data_dict['WrappedContentKey'] + wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], + decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), + wrapped_content_key['KeyId']) + + encryption_agent = encryption_data_dict['EncryptionAgent'] + encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], + encryption_agent['Protocol']) + + if 'KeyWrappingMetadata' in encryption_data_dict: + key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] + else: + key_wrapping_metadata = None + + encryption_data = _EncryptionData(decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), + encryption_agent, + wrapped_content_key, + key_wrapping_metadata) + + return encryption_data + + +def _generate_AES_CBC_cipher(cek, iv): + ''' + Generates and returns an encryption cipher for AES CBC using the given cek and iv. + + :param bytes[] cek: The content encryption key for the cipher. + :param bytes[] iv: The initialization vector for the cipher. + :return: A cipher for encrypting in AES256 CBC. + :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher + ''' + + backend = default_backend() + algorithm = AES(cek) + mode = CBC(iv) + return Cipher(algorithm, mode, backend) + + +def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): + ''' + Extracts and returns the content_encryption_key stored in the encryption_data object + and performs necessary validation on all parameters. + :param _EncryptionData encryption_data: + The encryption metadata of the retrieved value. + :param obj key_encryption_key: + The key_encryption_key used to unwrap the cek. Please refer to high-level service object + instance variables for more details. + :param func key_resolver: + A function used that, given a key_id, will return a key_encryption_key. Please refer + to high-level service object instance variables for more details. + :return: the content_encryption_key stored in the encryption_data object. + :rtype: bytes[] + ''' + + _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) + _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) + + if _ENCRYPTION_PROTOCOL_V1 != encryption_data.encryption_agent.protocol: + raise ValueError('Encryption version is not supported.') + + content_encryption_key = None + + # If the resolver exists, give priority to the key it finds. + if key_resolver is not None: + key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) + + _validate_not_none('key_encryption_key', key_encryption_key) + if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) + if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): + raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') + # Will throw an exception if the specified algorithm is not supported. + content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, + encryption_data.wrapped_content_key.algorithm) + _validate_not_none('content_encryption_key', content_encryption_key) + + return content_encryption_key + + +def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): + ''' + Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. + Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). + Returns the original plaintex. + + :param str message: + The ciphertext to be decrypted. + :param _EncryptionData encryption_data: + The metadata associated with this ciphertext. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted plaintext. + :rtype: str + ''' + _validate_not_none('message', message) + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) + + if _EncryptionAlgorithm.AES_CBC_256 != encryption_data.encryption_agent.encryption_algorithm: + raise ValueError('Specified encryption algorithm is not supported.') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) + + # decrypt data + decrypted_data = message + decryptor = cipher.decryptor() + decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) + + # unpad data + unpadder = PKCS7(128).unpadder() + decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) + + return decrypted_data + + +def encrypt_blob(blob, key_encryption_key): + ''' + Encrypts the given blob using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encryption metadata. This method should + only be used when a blob is small enough for single shot upload. Encrypting larger blobs + is done as a part of the upload_data_chunks method. + + :param bytes blob: + The blob to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. + :rtype: (str, bytes) + ''' + + _validate_not_none('blob', blob) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(blob) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + + return dumps(encryption_data), encrypted_data + + +def generate_blob_encryption_data(key_encryption_key): + ''' + Generates the encryption_metadata for the blob. + + :param bytes key_encryption_key: + The key-encryption-key used to wrap the cek associate with this blob. + :return: A tuple containing the cek and iv for this blob as well as the + serialized encryption metadata for the blob. + :rtype: (bytes, bytes, str) + ''' + encryption_data = None + content_encryption_key = None + initialization_vector = None + if key_encryption_key: + _validate_key_encryption_key_wrap(key_encryption_key) + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + encryption_data = _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + encryption_data = dumps(encryption_data) + + return content_encryption_key, initialization_vector, encryption_data + + +def decrypt_blob(require_encryption, key_encryption_key, key_resolver, + content, start_offset, end_offset, response_headers): + ''' + Decrypts the given blob contents and returns only the requested range. + + :param bool require_encryption: + Whether or not the calling blob service requires objects to be decrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :param key_resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted blob content. + :rtype: bytes + ''' + try: + encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) + except: # pylint: disable=bare-except + if require_encryption: + raise ValueError( + 'Encryption required, but received data does not contain appropriate metatadata.' + \ + 'Data was either not encrypted or metadata has been lost.') + + return content + + if encryption_data.encryption_agent.encryption_algorithm != _EncryptionAlgorithm.AES_CBC_256: + raise ValueError('Specified encryption algorithm is not supported.') + + blob_type = response_headers['x-ms-blob-type'] + + iv = None + unpad = False + if 'content-range' in response_headers: + content_range = response_headers['content-range'] + # Format: 'bytes x-y/size' + + # Ignore the word 'bytes' + content_range = content_range.split(' ') + + content_range = content_range[1].split('-') + content_range = content_range[1].split('/') + end_range = int(content_range[0]) + blob_size = int(content_range[1]) + + if start_offset >= 16: + iv = content[:16] + content = content[16:] + start_offset -= 16 + else: + iv = encryption_data.content_encryption_IV + + if end_range == blob_size - 1: + unpad = True + else: + unpad = True + iv = encryption_data.content_encryption_IV + + if blob_type == 'PageBlob': + unpad = False + + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) + cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) + decryptor = cipher.decryptor() + + content = decryptor.update(content) + decryptor.finalize() + if unpad: + unpadder = PKCS7(128).unpadder() + content = unpadder.update(content) + unpadder.finalize() + + return content[start_offset: len(content) - end_offset] + + +def get_blob_encryptor_and_padder(cek, iv, should_pad): + encryptor = None + padder = None + + if cek is not None and iv is not None: + cipher = _generate_AES_CBC_cipher(cek, iv) + encryptor = cipher.encryptor() + padder = PKCS7(128).padder() if should_pad else None + + return encryptor, padder + + +def encrypt_queue_message(message, key_encryption_key): + ''' + Encrypts the given plain text message using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encrypted message and the encryption metadata. + + :param object message: + The plain text messge to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A json-formatted string containing the encrypted message and the encryption metadata. + :rtype: str + ''' + + _validate_not_none('message', message) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = os.urandom(32) + initialization_vector = os.urandom(16) + + # Queue encoding functions all return unicode strings, and encryption should + # operate on binary strings. + message = message.encode('utf-8') + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(message) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + + # Build the dictionary structure. + queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), + 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector)} + + return dumps(queue_message) + + +def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): + ''' + Returns the decrypted message contents from an EncryptedQueueMessage. + If no encryption metadata is present, will return the unaltered message. + :param str message: + The JSON formatted QueueEncryptedMessage contents with all associated metadata. + :param bool require_encryption: + If set, will enforce that the retrieved messages are encrypted and decrypt them. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The plain text message from the queue message. + :rtype: str + ''' + + try: + message = loads(message) + + encryption_data = _dict_to_encryption_data(message['EncryptionData']) + decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) + except (KeyError, ValueError): + # Message was not json formatted and so was not encrypted + # or the user provided a json formatted message. + if require_encryption: + raise ValueError('Message was not encrypted.') + + return message + try: + return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=response, + error=error) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/models.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/models.py new file mode 100644 index 0000000..27cd236 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/models.py @@ -0,0 +1,468 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-instance-attributes + +from enum import Enum + + +def get_enum_value(value): + if value is None or value in ["None", ""]: + return None + try: + return value.value + except AttributeError: + return value + + +class StorageErrorCode(str, Enum): + + # Generic storage values + account_already_exists = "AccountAlreadyExists" + account_being_created = "AccountBeingCreated" + account_is_disabled = "AccountIsDisabled" + authentication_failed = "AuthenticationFailed" + authorization_failure = "AuthorizationFailure" + no_authentication_information = "NoAuthenticationInformation" + condition_headers_not_supported = "ConditionHeadersNotSupported" + condition_not_met = "ConditionNotMet" + empty_metadata_key = "EmptyMetadataKey" + insufficient_account_permissions = "InsufficientAccountPermissions" + internal_error = "InternalError" + invalid_authentication_info = "InvalidAuthenticationInfo" + invalid_header_value = "InvalidHeaderValue" + invalid_http_verb = "InvalidHttpVerb" + invalid_input = "InvalidInput" + invalid_md5 = "InvalidMd5" + invalid_metadata = "InvalidMetadata" + invalid_query_parameter_value = "InvalidQueryParameterValue" + invalid_range = "InvalidRange" + invalid_resource_name = "InvalidResourceName" + invalid_uri = "InvalidUri" + invalid_xml_document = "InvalidXmlDocument" + invalid_xml_node_value = "InvalidXmlNodeValue" + md5_mismatch = "Md5Mismatch" + metadata_too_large = "MetadataTooLarge" + missing_content_length_header = "MissingContentLengthHeader" + missing_required_query_parameter = "MissingRequiredQueryParameter" + missing_required_header = "MissingRequiredHeader" + missing_required_xml_node = "MissingRequiredXmlNode" + multiple_condition_headers_not_supported = "MultipleConditionHeadersNotSupported" + operation_timed_out = "OperationTimedOut" + out_of_range_input = "OutOfRangeInput" + out_of_range_query_parameter_value = "OutOfRangeQueryParameterValue" + request_body_too_large = "RequestBodyTooLarge" + resource_type_mismatch = "ResourceTypeMismatch" + request_url_failed_to_parse = "RequestUrlFailedToParse" + resource_already_exists = "ResourceAlreadyExists" + resource_not_found = "ResourceNotFound" + server_busy = "ServerBusy" + unsupported_header = "UnsupportedHeader" + unsupported_xml_node = "UnsupportedXmlNode" + unsupported_query_parameter = "UnsupportedQueryParameter" + unsupported_http_verb = "UnsupportedHttpVerb" + + # Blob values + append_position_condition_not_met = "AppendPositionConditionNotMet" + blob_already_exists = "BlobAlreadyExists" + blob_not_found = "BlobNotFound" + blob_overwritten = "BlobOverwritten" + blob_tier_inadequate_for_content_length = "BlobTierInadequateForContentLength" + block_count_exceeds_limit = "BlockCountExceedsLimit" + block_list_too_long = "BlockListTooLong" + cannot_change_to_lower_tier = "CannotChangeToLowerTier" + cannot_verify_copy_source = "CannotVerifyCopySource" + container_already_exists = "ContainerAlreadyExists" + container_being_deleted = "ContainerBeingDeleted" + container_disabled = "ContainerDisabled" + container_not_found = "ContainerNotFound" + content_length_larger_than_tier_limit = "ContentLengthLargerThanTierLimit" + copy_across_accounts_not_supported = "CopyAcrossAccountsNotSupported" + copy_id_mismatch = "CopyIdMismatch" + feature_version_mismatch = "FeatureVersionMismatch" + incremental_copy_blob_mismatch = "IncrementalCopyBlobMismatch" + incremental_copy_of_eralier_version_snapshot_not_allowed = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + incremental_copy_source_must_be_snapshot = "IncrementalCopySourceMustBeSnapshot" + infinite_lease_duration_required = "InfiniteLeaseDurationRequired" + invalid_blob_or_block = "InvalidBlobOrBlock" + invalid_blob_tier = "InvalidBlobTier" + invalid_blob_type = "InvalidBlobType" + invalid_block_id = "InvalidBlockId" + invalid_block_list = "InvalidBlockList" + invalid_operation = "InvalidOperation" + invalid_page_range = "InvalidPageRange" + invalid_source_blob_type = "InvalidSourceBlobType" + invalid_source_blob_url = "InvalidSourceBlobUrl" + invalid_version_for_page_blob_operation = "InvalidVersionForPageBlobOperation" + lease_already_present = "LeaseAlreadyPresent" + lease_already_broken = "LeaseAlreadyBroken" + lease_id_mismatch_with_blob_operation = "LeaseIdMismatchWithBlobOperation" + lease_id_mismatch_with_container_operation = "LeaseIdMismatchWithContainerOperation" + lease_id_mismatch_with_lease_operation = "LeaseIdMismatchWithLeaseOperation" + lease_id_missing = "LeaseIdMissing" + lease_is_breaking_and_cannot_be_acquired = "LeaseIsBreakingAndCannotBeAcquired" + lease_is_breaking_and_cannot_be_changed = "LeaseIsBreakingAndCannotBeChanged" + lease_is_broken_and_cannot_be_renewed = "LeaseIsBrokenAndCannotBeRenewed" + lease_lost = "LeaseLost" + lease_not_present_with_blob_operation = "LeaseNotPresentWithBlobOperation" + lease_not_present_with_container_operation = "LeaseNotPresentWithContainerOperation" + lease_not_present_with_lease_operation = "LeaseNotPresentWithLeaseOperation" + max_blob_size_condition_not_met = "MaxBlobSizeConditionNotMet" + no_pending_copy_operation = "NoPendingCopyOperation" + operation_not_allowed_on_incremental_copy_blob = "OperationNotAllowedOnIncrementalCopyBlob" + pending_copy_operation = "PendingCopyOperation" + previous_snapshot_cannot_be_newer = "PreviousSnapshotCannotBeNewer" + previous_snapshot_not_found = "PreviousSnapshotNotFound" + previous_snapshot_operation_not_supported = "PreviousSnapshotOperationNotSupported" + sequence_number_condition_not_met = "SequenceNumberConditionNotMet" + sequence_number_increment_too_large = "SequenceNumberIncrementTooLarge" + snapshot_count_exceeded = "SnapshotCountExceeded" + snaphot_operation_rate_exceeded = "SnaphotOperationRateExceeded" + snapshots_present = "SnapshotsPresent" + source_condition_not_met = "SourceConditionNotMet" + system_in_use = "SystemInUse" + target_condition_not_met = "TargetConditionNotMet" + unauthorized_blob_overwrite = "UnauthorizedBlobOverwrite" + blob_being_rehydrated = "BlobBeingRehydrated" + blob_archived = "BlobArchived" + blob_not_archived = "BlobNotArchived" + + # Queue values + invalid_marker = "InvalidMarker" + message_not_found = "MessageNotFound" + message_too_large = "MessageTooLarge" + pop_receipt_mismatch = "PopReceiptMismatch" + queue_already_exists = "QueueAlreadyExists" + queue_being_deleted = "QueueBeingDeleted" + queue_disabled = "QueueDisabled" + queue_not_empty = "QueueNotEmpty" + queue_not_found = "QueueNotFound" + + # File values + cannot_delete_file_or_directory = "CannotDeleteFileOrDirectory" + client_cache_flush_delay = "ClientCacheFlushDelay" + delete_pending = "DeletePending" + directory_not_empty = "DirectoryNotEmpty" + file_lock_conflict = "FileLockConflict" + invalid_file_or_directory_path_name = "InvalidFileOrDirectoryPathName" + parent_not_found = "ParentNotFound" + read_only_attribute = "ReadOnlyAttribute" + share_already_exists = "ShareAlreadyExists" + share_being_deleted = "ShareBeingDeleted" + share_disabled = "ShareDisabled" + share_not_found = "ShareNotFound" + sharing_violation = "SharingViolation" + share_snapshot_in_progress = "ShareSnapshotInProgress" + share_snapshot_count_exceeded = "ShareSnapshotCountExceeded" + share_snapshot_operation_not_supported = "ShareSnapshotOperationNotSupported" + share_has_snapshots = "ShareHasSnapshots" + container_quota_downgrade_not_allowed = "ContainerQuotaDowngradeNotAllowed" + + # DataLake values + content_length_must_be_zero = 'ContentLengthMustBeZero' + path_already_exists = 'PathAlreadyExists' + invalid_flush_position = 'InvalidFlushPosition' + invalid_property_name = 'InvalidPropertyName' + invalid_source_uri = 'InvalidSourceUri' + unsupported_rest_version = 'UnsupportedRestVersion' + file_system_not_found = 'FilesystemNotFound' + path_not_found = 'PathNotFound' + rename_destination_parent_path_not_found = 'RenameDestinationParentPathNotFound' + source_path_not_found = 'SourcePathNotFound' + destination_path_is_being_deleted = 'DestinationPathIsBeingDeleted' + file_system_already_exists = 'FilesystemAlreadyExists' + file_system_being_deleted = 'FilesystemBeingDeleted' + invalid_destination_path = 'InvalidDestinationPath' + invalid_rename_source_path = 'InvalidRenameSourcePath' + invalid_source_or_destination_resource_type = 'InvalidSourceOrDestinationResourceType' + lease_is_already_broken = 'LeaseIsAlreadyBroken' + lease_name_mismatch = 'LeaseNameMismatch' + path_conflict = 'PathConflict' + source_path_is_being_deleted = 'SourcePathIsBeingDeleted' + + +class DictMixin(object): + + def __setitem__(self, key, item): + self.__dict__[key] = item + + def __getitem__(self, key): + return self.__dict__[key] + + def __repr__(self): + return str(self) + + def __len__(self): + return len(self.keys()) + + def __delitem__(self, key): + self.__dict__[key] = None + + def __eq__(self, other): + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self): + return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) + + def has_key(self, k): + return k in self.__dict__ + + def update(self, *args, **kwargs): + return self.__dict__.update(*args, **kwargs) + + def keys(self): + return [k for k in self.__dict__ if not k.startswith('_')] + + def values(self): + return [v for k, v in self.__dict__.items() if not k.startswith('_')] + + def items(self): + return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] + + def get(self, key, default=None): + if key in self.__dict__: + return self.__dict__[key] + return default + + +class LocationMode(object): + """ + Specifies the location the request should be sent to. This mode only applies + for RA-GRS accounts which allow secondary read access. All other account types + must use PRIMARY. + """ + + PRIMARY = 'primary' #: Requests should be sent to the primary location. + SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. + + +class ResourceTypes(object): + """ + Specifies the resource types that are accessible with the account SAS. + + :param bool service: + Access to service-level APIs (e.g., Get/Set Service Properties, + Get Service Stats, List Containers/Queues/Shares) + :param bool container: + Access to container-level APIs (e.g., Create/Delete Container, + Create/Delete Queue, Create/Delete Share, + List Blobs/Files and Directories) + :param bool object: + Access to object-level APIs for blobs, queue messages, and + files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) + """ + + def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin + self.service = service + self.container = container + self.object = object + self._str = (('s' if self.service else '') + + ('c' if self.container else '') + + ('o' if self.object else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create a ResourceTypes from a string. + + To specify service, container, or object you need only to + include the first letter of the word in the string. E.g. service and container, + you would provide a string "sc". + + :param str string: Specify service, container, or object in + in the string with the first letter of the word. + :return: A ResourceTypes object + :rtype: ~azure.storage.fileshare.ResourceTypes + """ + res_service = 's' in string + res_container = 'c' in string + res_object = 'o' in string + + parsed = cls(res_service, res_container, res_object) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class AccountSasPermissions(object): + """ + :class:`~ResourceTypes` class to be used with generate_account_sas + function and for the AccessPolicies used with set_*_acl. There are two types of + SAS which may be used to grant resource access. One is to grant access to a + specific resource (resource-specific). Another is to grant access to the + entire service for a specific account and allow certain operations based on + perms found here. + + :param bool read: + Valid for all signed resources types (Service, Container, and Object). + Permits read permissions to the specified resource type. + :param bool write: + Valid for all signed resources types (Service, Container, and Object). + Permits write permissions to the specified resource type. + :param bool delete: + Valid for Container and Object resource types, except for queue messages. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool list: + Valid for Service and Container resource types only. + :param bool add: + Valid for the following Object resource types only: queue messages, and append blobs. + :param bool create: + Valid for the following Object resource types only: blobs and files. + Users can create new blobs or files, but may not overwrite existing + blobs or files. + :param bool update: + Valid for the following Object resource types only: queue messages. + :param bool process: + Valid for the following Object resource type only: queue messages. + :keyword bool tag: + To enable set or get tags on the blobs in the container. + :keyword bool filter_by_tags: + To enable get blobs by tags, this should be used together with list permission. + """ + def __init__(self, read=False, write=False, delete=False, + list=False, # pylint: disable=redefined-builtin + add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): + self.read = read + self.write = write + self.delete = delete + self.delete_previous_version = delete_previous_version + self.list = list + self.add = add + self.create = create + self.update = update + self.process = process + self.tag = kwargs.pop('tag', False) + self.filter_by_tags = kwargs.pop('filter_by_tags', False) + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('l' if self.list else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('u' if self.update else '') + + ('p' if self.process else '') + + ('f' if self.filter_by_tags else '') + + ('t' if self.tag else '') + ) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create AccountSasPermissions from a string. + + To specify read, write, delete, etc. permissions you need only to + include the first letter of the word in the string. E.g. for read and write + permissions you would provide a string "rw". + + :param str permission: Specify permissions in + the string with the first letter of the word. + :return: An AccountSasPermissions object + :rtype: ~azure.storage.fileshare.AccountSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission + p_list = 'l' in permission + p_add = 'a' in permission + p_create = 'c' in permission + p_update = 'u' in permission + p_process = 'p' in permission + p_tag = 't' in permission + p_filter_by_tags = 'f' in permission + parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, + list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, + filter_by_tags=p_filter_by_tags) + + return parsed + + +class Services(object): + """Specifies the services accessible with the account SAS. + + :param bool blob: + Access for the `~azure.storage.blob.BlobServiceClient` + :param bool queue: + Access for the `~azure.storage.queue.QueueServiceClient` + :param bool fileshare: + Access for the `~azure.storage.fileshare.ShareServiceClient` + """ + + def __init__(self, blob=False, queue=False, fileshare=False): + self.blob = blob + self.queue = queue + self.fileshare = fileshare + self._str = (('b' if self.blob else '') + + ('q' if self.queue else '') + + ('f' if self.fileshare else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create Services from a string. + + To specify blob, queue, or file you need only to + include the first letter of the word in the string. E.g. for blob and queue + you would provide a string "bq". + + :param str string: Specify blob, queue, or file in + in the string with the first letter of the word. + :return: A Services object + :rtype: ~azure.storage.fileshare.Services + """ + res_blob = 'b' in string + res_queue = 'q' in string + res_file = 'f' in string + + parsed = cls(res_blob, res_queue, res_file) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class UserDelegationKey(object): + """ + Represents a user delegation key, provided to the user by Azure Storage + based on their Azure Active Directory access token. + + The fields are saved as simple strings since the user does not have to interact with this object; + to generate an identify SAS, the user can simply pass it to the right API. + + :ivar str signed_oid: + Object ID of this token. + :ivar str signed_tid: + Tenant ID of the tenant that issued this token. + :ivar str signed_start: + The datetime this token becomes valid. + :ivar str signed_expiry: + The datetime this token expires. + :ivar str signed_service: + What service this key is valid for. + :ivar str signed_version: + The version identifier of the REST service that created this token. + :ivar str value: + The user delegation key. + """ + def __init__(self): + self.signed_oid = None + self.signed_tid = None + self.signed_start = None + self.signed_expiry = None + self.signed_service = None + self.signed_version = None + self.value = None diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/parser.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/parser.py new file mode 100644 index 0000000..c6feba8 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/parser.py @@ -0,0 +1,20 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys + +if sys.version_info < (3,): + def _str(value): + if isinstance(value, unicode): # pylint: disable=undefined-variable + return value.encode('utf-8') + + return str(value) +else: + _str = str + + +def _to_utc_datetime(value): + return value.strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/policies.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/policies.py new file mode 100644 index 0000000..11fc984 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/policies.py @@ -0,0 +1,608 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import re +import random +from time import time +from io import SEEK_SET, UnsupportedOperation +import logging +import uuid +import types +from typing import Any, TYPE_CHECKING +from wsgiref.handlers import format_date_time +try: + from urllib.parse import ( + urlparse, + parse_qsl, + urlunparse, + urlencode, + ) +except ImportError: + from urllib import urlencode # type: ignore + from urlparse import ( # type: ignore + urlparse, + parse_qsl, + urlunparse, + ) + +from azure.core.pipeline.policies import ( + HeadersPolicy, + SansIOHTTPPolicy, + NetworkTraceLoggingPolicy, + HTTPPolicy, + RequestHistory +) +from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError + +from .models import LocationMode + +try: + _unicode_type = unicode # type: ignore +except NameError: + _unicode_type = str + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +def encode_base64(data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def is_exhausted(settings): + """Are we out of retries?""" + retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) + retry_counts = list(filter(None, retry_counts)) + if not retry_counts: + return False + return min(retry_counts) < 0 + + +def retry_hook(settings, **kwargs): + if settings['hook']: + settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) + + +def is_retry(response, mode): + """Is this method/status code retryable? (Based on whitelists and control + variables such as the number of total retries to allow, whether to + respect the Retry-After header, whether this header is present, and + whether the returned status code is on the list of status codes to + be retried upon on the presence of the aforementioned header) + """ + status = response.http_response.status_code + if 300 <= status < 500: + # An exception occured, but in most cases it was expected. Examples could + # include a 309 Conflict or 412 Precondition Failed. + if status == 404 and mode == LocationMode.SECONDARY: + # Response code 404 should be retried if secondary was used. + return True + if status == 408: + # Response code 408 is a timeout and should be retried. + return True + return False + if status >= 500: + # Response codes above 500 with the exception of 501 Not Implemented and + # 505 Version Not Supported indicate a server issue and should be retried. + if status in [501, 505]: + return False + return True + return False + + +def urljoin(base_url, stub_url): + parsed = urlparse(base_url) + parsed = parsed._replace(path=parsed.path + '/' + stub_url) + return parsed.geturl() + + +class QueueMessagePolicy(SansIOHTTPPolicy): + + def on_request(self, request): + message_id = request.context.options.pop('queue_message_id', None) + if message_id: + request.http_request.url = urljoin( + request.http_request.url, + message_id) + + +class StorageHeadersPolicy(HeadersPolicy): + request_id_header_name = 'x-ms-client-request-id' + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + super(StorageHeadersPolicy, self).on_request(request) + current_time = format_date_time(time()) + request.http_request.headers['x-ms-date'] = current_time + + custom_id = request.context.options.pop('client_request_id', None) + request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) + + # def on_response(self, request, response): + # # raise exception if the echoed client request id from the service is not identical to the one we sent + # if self.request_id_header_name in response.http_response.headers: + + # client_request_id = request.http_request.headers.get(self.request_id_header_name) + + # if response.http_response.headers[self.request_id_header_name] != client_request_id: + # raise AzureError( + # "Echoed client request ID: {} does not match sent client request ID: {}. " + # "Service request ID: {}".format( + # response.http_response.headers[self.request_id_header_name], client_request_id, + # response.http_response.headers['x-ms-request-id']), + # response=response.http_response + # ) + + +class StorageHosts(SansIOHTTPPolicy): + + def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument + self.hosts = hosts + super(StorageHosts, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + request.context.options['hosts'] = self.hosts + parsed_url = urlparse(request.http_request.url) + + # Detect what location mode we're currently requesting with + location_mode = LocationMode.PRIMARY + for key, value in self.hosts.items(): + if parsed_url.netloc == value: + location_mode = key + + # See if a specific location mode has been specified, and if so, redirect + use_location = request.context.options.pop('use_location', None) + if use_location: + # Lock retries to the specific location + request.context.options['retry_to_secondary'] = False + if use_location not in self.hosts: + raise ValueError("Attempting to use undefined host location {}".format(use_location)) + if use_location != location_mode: + # Update request URL to use the specified location + updated = parsed_url._replace(netloc=self.hosts[use_location]) + request.http_request.url = updated.geturl() + location_mode = use_location + + request.context.options['location_mode'] = location_mode + + +class StorageLoggingPolicy(NetworkTraceLoggingPolicy): + """A policy that logs HTTP request and response to the DEBUG logger. + + This accepts both global configuration, and per-request level with "enable_http_logger" + """ + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + http_request = request.http_request + options = request.context.options + if options.pop("logging_enable", self.enable_http_logger): + request.context["logging_enable"] = True + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + log_url = http_request.url + query_params = http_request.query + if 'sig' in query_params: + log_url = log_url.replace(query_params['sig'], "sig=*****") + _LOGGER.debug("Request URL: %r", log_url) + _LOGGER.debug("Request method: %r", http_request.method) + _LOGGER.debug("Request headers:") + for header, value in http_request.headers.items(): + if header.lower() == 'authorization': + value = '*****' + elif header.lower() == 'x-ms-copy-source' and 'sig' in value: + # take the url apart and scrub away the signed signature + scheme, netloc, path, params, query, fragment = urlparse(value) + parsed_qs = dict(parse_qsl(query)) + parsed_qs['sig'] = '*****' + + # the SAS needs to be put back together + value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) + + _LOGGER.debug(" %r: %r", header, value) + _LOGGER.debug("Request body:") + + # We don't want to log the binary data of a file upload. + if isinstance(http_request.body, types.GeneratorType): + _LOGGER.debug("File upload") + else: + _LOGGER.debug(str(http_request.body)) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log request: %r", err) + + def on_response(self, request, response): + # type: (PipelineRequest, PipelineResponse, Any) -> None + if response.context.pop("logging_enable", self.enable_http_logger): + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + _LOGGER.debug("Response status: %r", response.http_response.status_code) + _LOGGER.debug("Response headers:") + for res_header, value in response.http_response.headers.items(): + _LOGGER.debug(" %r: %r", res_header, value) + + # We don't want to log binary data if the response is a file. + _LOGGER.debug("Response content:") + pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) + header = response.http_response.headers.get('content-disposition') + + if header and pattern.match(header): + filename = header.partition('=')[2] + _LOGGER.debug("File attachments: %s", filename) + elif response.http_response.headers.get("content-type", "").endswith("octet-stream"): + _LOGGER.debug("Body contains binary data.") + elif response.http_response.headers.get("content-type", "").startswith("image"): + _LOGGER.debug("Body contains image data.") + else: + if response.context.options.get('stream', False): + _LOGGER.debug("Body is streamable") + else: + _LOGGER.debug(response.http_response.text()) + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log response: %s", repr(err)) + + +class StorageRequestHook(SansIOHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._request_callback = kwargs.get('raw_request_hook') + super(StorageRequestHook, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, **Any) -> PipelineResponse + request_callback = request.context.options.pop('raw_request_hook', self._request_callback) + if request_callback: + request_callback(request) + + +class StorageResponseHook(HTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(StorageResponseHook, self).__init__() + + def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = self.next.send(request) + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + response_callback(response) + request.context['response_callback'] = response_callback + return response + + +class StorageContentValidation(SansIOHTTPPolicy): + """A simple policy that sends the given headers + with the request. + + This will overwrite any headers already defined in the request. + """ + header_name = 'Content-MD5' + + def __init__(self, **kwargs): # pylint: disable=unused-argument + super(StorageContentValidation, self).__init__() + + @staticmethod + def get_content_md5(data): + md5 = hashlib.md5() # nosec + if isinstance(data, bytes): + md5.update(data) + elif hasattr(data, 'read'): + pos = 0 + try: + pos = data.tell() + except: # pylint: disable=bare-except + pass + for chunk in iter(lambda: data.read(4096), b""): + md5.update(chunk) + try: + data.seek(pos, SEEK_SET) + except (AttributeError, IOError): + raise ValueError("Data should be bytes or a seekable file-like object.") + else: + raise ValueError("Data should be bytes or a seekable file-like object.") + + return md5.digest() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + validate_content = request.context.options.pop('validate_content', False) + if validate_content and request.http_request.method != 'GET': + computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) + request.http_request.headers[self.header_name] = computed_md5 + request.context['validate_content_md5'] = computed_md5 + request.context['validate_content'] = validate_content + + def on_response(self, request, response): + if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): + computed_md5 = request.context.get('validate_content_md5') or \ + encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) + if response.http_response.headers['content-md5'] != computed_md5: + raise AzureError( + 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.'.format( + response.http_response.headers['content-md5'], computed_md5), + response=response.http_response + ) + + +class StorageRetryPolicy(HTTPPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + def __init__(self, **kwargs): + self.total_retries = kwargs.pop('retry_total', 10) + self.connect_retries = kwargs.pop('retry_connect', 3) + self.read_retries = kwargs.pop('retry_read', 3) + self.status_retries = kwargs.pop('retry_status', 3) + self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) + super(StorageRetryPolicy, self).__init__() + + def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use + """ + A function which sets the next host location on the request, if applicable. + + :param ~azure.storage.models.RetryContext context: + The retry context containing the previous host location and the request + to evaluate and possibly modify. + """ + if settings['hosts'] and all(settings['hosts'].values()): + url = urlparse(request.url) + # If there's more than one possible location, retry to the alternative + if settings['mode'] == LocationMode.PRIMARY: + settings['mode'] = LocationMode.SECONDARY + else: + settings['mode'] = LocationMode.PRIMARY + updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) + request.url = updated.geturl() + + def configure_retries(self, request): # pylint: disable=no-self-use + body_position = None + if hasattr(request.http_request.body, 'read'): + try: + body_position = request.http_request.body.tell() + except (AttributeError, UnsupportedOperation): + # if body position cannot be obtained, then retries will not work + pass + options = request.context.options + return { + 'total': options.pop("retry_total", self.total_retries), + 'connect': options.pop("retry_connect", self.connect_retries), + 'read': options.pop("retry_read", self.read_retries), + 'status': options.pop("retry_status", self.status_retries), + 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), + 'mode': options.pop("location_mode", LocationMode.PRIMARY), + 'hosts': options.pop("hosts", None), + 'hook': options.pop("retry_hook", None), + 'body_position': body_position, + 'count': 0, + 'history': [] + } + + def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use + """ Formula for computing the current backoff. + Should be calculated by child class. + + :rtype: float + """ + return 0 + + def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + transport.sleep(backoff) + + def increment(self, settings, request, response=None, error=None): + """Increment the retry counters. + + :param response: A pipeline response object. + :param error: An error encountered during the request, or + None if the response was received successfully. + + :return: Whether the retry attempts are exhausted. + """ + settings['total'] -= 1 + + if error and isinstance(error, ServiceRequestError): + # Errors when we're fairly sure that the server did not receive the + # request, so it should be safe to retry. + settings['connect'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + elif error and isinstance(error, ServiceResponseError): + # Errors that occur after the request has been started, so we should + # assume that the server began processing it. + settings['read'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + else: + # Incrementing because of a server error like a 500 in + # status_forcelist and a the given method is in the whitelist + if response: + settings['status'] -= 1 + settings['history'].append(RequestHistory(request, http_response=response)) + + if not is_exhausted(settings): + if request.method not in ['PUT'] and settings['retry_secondary']: + self._set_next_host_location(settings, request) + + # rewind the request body if it is a stream + if request.body and hasattr(request.body, 'read'): + # no position was saved, then retry would not work + if settings['body_position'] is None: + return False + try: + # attempt to rewind the body to the initial position + request.body.seek(settings['body_position'], SEEK_SET) + except (UnsupportedOperation, ValueError): + # if body is not seekable, then retry would not work + return False + settings['count'] += 1 + return True + return False + + def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(StorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(StorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/policies_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/policies_async.py new file mode 100644 index 0000000..e0926b8 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/policies_async.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import asyncio +import random +import logging +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline.policies import AsyncHTTPPolicy +from azure.core.exceptions import AzureError + +from .policies import is_retry, StorageRetryPolicy + +if TYPE_CHECKING: + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +async def retry_hook(settings, **kwargs): + if settings['hook']: + if asyncio.iscoroutine(settings['hook']): + await settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + else: + settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + + +class AsyncStorageResponseHook(AsyncHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(AsyncStorageResponseHook, self).__init__() + + async def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + data_stream_total = request.context.get('data_stream_total') or \ + request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') or \ + request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') or \ + request.context.options.pop('upload_stream_current', None) + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = await self.next.send(request) + await response.http_response.load_body() + + will_retry = is_retry(response, request.context.options.get('mode')) + if not will_retry and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif not will_retry and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + if asyncio.iscoroutine(response_callback): + await response_callback(response) + else: + response_callback(response) + request.context['response_callback'] = response_callback + return response + +class AsyncStorageRetryPolicy(StorageRetryPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + async def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + await transport.sleep(backoff) + + async def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = await self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + await self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + await self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(AsyncStorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(AsyncStorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/request_handlers.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/request_handlers.py new file mode 100644 index 0000000..37354d7 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/request_handlers.py @@ -0,0 +1,273 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) + +import logging +from os import fstat +from io import (SEEK_END, SEEK_SET, UnsupportedOperation) + +import isodate + +from azure.core.exceptions import raise_with_traceback + + +_LOGGER = logging.getLogger(__name__) + +_REQUEST_DELIMITER_PREFIX = "batch_" +_HTTP1_1_IDENTIFIER = "HTTP/1.1" +_HTTP_LINE_ENDING = "\r\n" + + +def serialize_iso(attr): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: ValueError if format invalid. + """ + if not attr: + return None + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, + utc.tm_hour, utc.tm_min, utc.tm_sec) + return date + 'Z' + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise_with_traceback(ValueError, msg, err) + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise_with_traceback(TypeError, msg, err) + + +def get_length(data): + length = None + # Check if object implements the __len__ method, covers most input cases such as bytearray. + try: + length = len(data) + except: # pylint: disable=bare-except + pass + + if not length: + # Check if the stream is a file-like stream object. + # If so, calculate the size using the file descriptor. + try: + fileno = data.fileno() + except (AttributeError, UnsupportedOperation): + pass + else: + try: + return fstat(fileno).st_size + except OSError: + # Not a valid fileno, may be possible requests returned + # a socket number? + pass + + # If the stream is seekable and tell() is implemented, calculate the stream size. + try: + current_position = data.tell() + data.seek(0, SEEK_END) + length = data.tell() - current_position + data.seek(current_position, SEEK_SET) + except (AttributeError, UnsupportedOperation): + pass + + return length + + +def read_length(data): + try: + if hasattr(data, 'read'): + read_data = b'' + for chunk in iter(lambda: data.read(4096), b""): + read_data += chunk + return len(read_data), read_data + if hasattr(data, '__iter__'): + read_data = b'' + for chunk in data: + read_data += chunk + return len(read_data), read_data + except: # pylint: disable=bare-except + pass + raise ValueError("Unable to calculate content length, please specify.") + + +def validate_and_format_range_headers( + start_range, end_range, start_range_required=True, + end_range_required=True, check_content_md5=False, align_to_page=False): + # If end range is provided, start range must be provided + if (start_range_required or end_range is not None) and start_range is None: + raise ValueError("start_range value cannot be None.") + if end_range_required and end_range is None: + raise ValueError("end_range value cannot be None.") + + # Page ranges must be 512 aligned + if align_to_page: + if start_range is not None and start_range % 512 != 0: + raise ValueError("Invalid page blob start_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(start_range)) + if end_range is not None and end_range % 512 != 511: + raise ValueError("Invalid page blob end_range: {0}. " + "The size must be aligned to a 512-byte boundary.".format(end_range)) + + # Format based on whether end_range is present + range_header = None + if end_range is not None: + range_header = 'bytes={0}-{1}'.format(start_range, end_range) + elif start_range is not None: + range_header = "bytes={0}-".format(start_range) + + # Content MD5 can only be provided for a complete range less than 4MB in size + range_validation = None + if check_content_md5: + if start_range is None or end_range is None: + raise ValueError("Both start and end range requied for MD5 content validation.") + if end_range - start_range > 4 * 1024 * 1024: + raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") + range_validation = 'true' + + return range_header, range_validation + + +def add_metadata_headers(metadata=None): + # type: (Optional[Dict[str, str]]) -> Dict[str, str] + headers = {} + if metadata: + for key, value in metadata.items(): + headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value + return headers + + +def serialize_batch_body(requests, batch_id): + """ + -- + + -- + (repeated as needed) + ---- + + Serializes the requests in this batch to a single HTTP mixed/multipart body. + + :param list[~azure.core.pipeline.transport.HttpRequest] requests: + a list of sub-request for the batch request + :param str batch_id: + to be embedded in batch sub-request delimiter + :return: The body bytes for this batch. + """ + + if requests is None or len(requests) == 0: + raise ValueError('Please provide sub-request(s) for this batch request') + + delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') + newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') + batch_body = list() + + content_index = 0 + for request in requests: + request.headers.update({ + "Content-ID": str(content_index), + "Content-Length": str(0) + }) + batch_body.append(delimiter_bytes) + batch_body.append(_make_body_from_sub_request(request)) + batch_body.append(newline_bytes) + content_index += 1 + + batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) + # final line of body MUST have \r\n at the end, or it will not be properly read by the service + batch_body.append(newline_bytes) + + return bytes().join(batch_body) + + +def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): + """ + Gets the delimiter used for this batch request's mixed/multipart HTTP format. + + :param str batch_id: + Randomly generated id + :param bool is_prepend_dashes: + Whether to include the starting dashes. Used in the body, but non on defining the delimiter. + :param bool is_append_dashes: + Whether to include the ending dashes. Used in the body on the closing delimiter only. + :return: The delimiter, WITHOUT a trailing newline. + """ + + prepend_dashes = '--' if is_prepend_dashes else '' + append_dashes = '--' if is_append_dashes else '' + + return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes + + +def _make_body_from_sub_request(sub_request): + """ + Content-Type: application/http + Content-ID: + Content-Transfer-Encoding: (if present) + + HTTP/ +
:
(repeated as necessary) + Content-Length: + (newline if content length > 0) + (if content length > 0) + + Serializes an http request. + + :param ~azure.core.pipeline.transport.HttpRequest sub_request: + Request to serialize. + :return: The serialized sub-request in bytes + """ + + # put the sub-request's headers into a list for efficient str concatenation + sub_request_body = list() + + # get headers for ease of manipulation; remove headers as they are used + headers = sub_request.headers + + # append opening headers + sub_request_body.append("Content-Type: application/http") + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-ID: ") + sub_request_body.append(headers.pop("Content-ID", "")) + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-Transfer-Encoding: binary") + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + # append HTTP verb and path and query and HTTP version + sub_request_body.append(sub_request.method) + sub_request_body.append(' ') + sub_request_body.append(sub_request.url) + sub_request_body.append(' ') + sub_request_body.append(_HTTP1_1_IDENTIFIER) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) + for header_name, header_value in headers.items(): + if header_value is not None: + sub_request_body.append(header_name) + sub_request_body.append(": ") + sub_request_body.append(header_value) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/response_handlers.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/response_handlers.py new file mode 100644 index 0000000..1863949 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/response_handlers.py @@ -0,0 +1,192 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging +from xml.etree.ElementTree import Element + +from azure.core.pipeline.policies import ContentDecodePolicy +from azure.core.exceptions import ( + HttpResponseError, + ResourceNotFoundError, + ResourceModifiedError, + ResourceExistsError, + ClientAuthenticationError, + DecodeError) + +from .parser import _to_utc_datetime +from .models import StorageErrorCode, UserDelegationKey, get_enum_value + + +if TYPE_CHECKING: + from datetime import datetime + from azure.core.exceptions import AzureError + + +_LOGGER = logging.getLogger(__name__) + + +class PartialBatchErrorException(HttpResponseError): + """There is a partial failure in batch operations. + + :param str message: The message of the exception. + :param response: Server response to be deserialized. + :param list parts: A list of the parts in multipart response. + """ + + def __init__(self, message, response, parts): + self.parts = parts + super(PartialBatchErrorException, self).__init__(message=message, response=response) + + +def parse_length_from_content_range(content_range): + ''' + Parses the blob length from the content range header: bytes 1-3/65537 + ''' + if content_range is None: + return None + + # First, split in space and take the second half: '1-3/65537' + # Next, split on slash and take the second half: '65537' + # Finally, convert to an int: 65537 + return int(content_range.split(' ', 1)[1].split('/', 1)[1]) + + +def normalize_headers(headers): + normalized = {} + for key, value in headers.items(): + if key.startswith('x-ms-'): + key = key[5:] + normalized[key.lower().replace('-', '_')] = get_enum_value(value) + return normalized + + +def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument + raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} + return {k[10:]: v for k, v in raw_metadata.items()} + + +def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers) + + +def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers), deserialized + + +def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return response.http_response.location_mode, deserialized + + +def process_storage_error(storage_error): # pylint:disable=too-many-statements + raise_error = HttpResponseError + serialized = False + if not storage_error.response: + raise storage_error + # If it is one of those three then it has been serialized prior by the generated layer. + if isinstance(storage_error, (PartialBatchErrorException, + ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError)): + serialized = True + error_code = storage_error.response.headers.get('x-ms-error-code') + error_message = storage_error.message + additional_data = {} + error_dict = {} + try: + error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) + # If it is an XML response + if isinstance(error_body, Element): + error_dict = { + child.tag.lower(): child.text + for child in error_body + } + # If it is a JSON response + elif isinstance(error_body, dict): + error_dict = error_body.get('error', {}) + elif not error_code: + _LOGGER.warning( + 'Unexpected return type % from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) + error_dict = {'message': str(error_body)} + + # If we extracted from a Json or XML response + if error_dict: + error_code = error_dict.get('code') + error_message = error_dict.get('message') + additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} + except DecodeError: + pass + + try: + # This check would be unnecessary if we have already serialized the error + if error_code and not serialized: + error_code = StorageErrorCode(error_code) + if error_code in [StorageErrorCode.condition_not_met, + StorageErrorCode.blob_overwritten]: + raise_error = ResourceModifiedError + if error_code in [StorageErrorCode.invalid_authentication_info, + StorageErrorCode.authentication_failed]: + raise_error = ClientAuthenticationError + if error_code in [StorageErrorCode.resource_not_found, + StorageErrorCode.cannot_verify_copy_source, + StorageErrorCode.blob_not_found, + StorageErrorCode.queue_not_found, + StorageErrorCode.container_not_found, + StorageErrorCode.parent_not_found, + StorageErrorCode.share_not_found]: + raise_error = ResourceNotFoundError + if error_code in [StorageErrorCode.account_already_exists, + StorageErrorCode.account_being_created, + StorageErrorCode.resource_already_exists, + StorageErrorCode.resource_type_mismatch, + StorageErrorCode.blob_already_exists, + StorageErrorCode.queue_already_exists, + StorageErrorCode.container_already_exists, + StorageErrorCode.container_being_deleted, + StorageErrorCode.queue_being_deleted, + StorageErrorCode.share_already_exists, + StorageErrorCode.share_being_deleted]: + raise_error = ResourceExistsError + except ValueError: + # Got an unknown error code + pass + + # Error message should include all the error properties + try: + error_message += "\nErrorCode:{}".format(error_code.value) + except AttributeError: + error_message += "\nErrorCode:{}".format(error_code) + for name, info in additional_data.items(): + error_message += "\n{}:{}".format(name, info) + + # No need to create an instance if it has already been serialized by the generated layer + if serialized: + storage_error.message = error_message + error = storage_error + else: + error = raise_error(message=error_message, response=storage_error.response) + # Ensure these properties are stored in the error instance as well (not just the error message) + error.error_code = error_code + error.additional_info = additional_data + # error.args is what's surfaced on the traceback - show error message in all cases + error.args = (error.message,) + try: + # `from None` prevents us from double printing the exception (suppresses generated layer error context) + exec("raise error from None") # pylint: disable=exec-used # nosec + except SyntaxError: + raise error + + +def parse_to_internal_user_delegation_key(service_user_delegation_key): + internal_user_delegation_key = UserDelegationKey() + internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid + internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid + internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) + internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) + internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service + internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version + internal_user_delegation_key.value = service_user_delegation_key.value + return internal_user_delegation_key diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/shared_access_signature.py new file mode 100644 index 0000000..07aad5f --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/shared_access_signature.py @@ -0,0 +1,220 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from datetime import date + +from .parser import _str, _to_utc_datetime +from .constants import X_MS_VERSION +from . import sign_string, url_quote + + +class QueryStringConstants(object): + SIGNED_SIGNATURE = 'sig' + SIGNED_PERMISSION = 'sp' + SIGNED_START = 'st' + SIGNED_EXPIRY = 'se' + SIGNED_RESOURCE = 'sr' + SIGNED_IDENTIFIER = 'si' + SIGNED_IP = 'sip' + SIGNED_PROTOCOL = 'spr' + SIGNED_VERSION = 'sv' + SIGNED_CACHE_CONTROL = 'rscc' + SIGNED_CONTENT_DISPOSITION = 'rscd' + SIGNED_CONTENT_ENCODING = 'rsce' + SIGNED_CONTENT_LANGUAGE = 'rscl' + SIGNED_CONTENT_TYPE = 'rsct' + START_PK = 'spk' + START_RK = 'srk' + END_PK = 'epk' + END_RK = 'erk' + SIGNED_RESOURCE_TYPES = 'srt' + SIGNED_SERVICES = 'ss' + SIGNED_OID = 'skoid' + SIGNED_TID = 'sktid' + SIGNED_KEY_START = 'skt' + SIGNED_KEY_EXPIRY = 'ske' + SIGNED_KEY_SERVICE = 'sks' + SIGNED_KEY_VERSION = 'skv' + + # for ADLS + SIGNED_AUTHORIZED_OID = 'saoid' + SIGNED_UNAUTHORIZED_OID = 'suoid' + SIGNED_CORRELATION_ID = 'scid' + SIGNED_DIRECTORY_DEPTH = 'sdd' + + @staticmethod + def to_list(): + return [ + QueryStringConstants.SIGNED_SIGNATURE, + QueryStringConstants.SIGNED_PERMISSION, + QueryStringConstants.SIGNED_START, + QueryStringConstants.SIGNED_EXPIRY, + QueryStringConstants.SIGNED_RESOURCE, + QueryStringConstants.SIGNED_IDENTIFIER, + QueryStringConstants.SIGNED_IP, + QueryStringConstants.SIGNED_PROTOCOL, + QueryStringConstants.SIGNED_VERSION, + QueryStringConstants.SIGNED_CACHE_CONTROL, + QueryStringConstants.SIGNED_CONTENT_DISPOSITION, + QueryStringConstants.SIGNED_CONTENT_ENCODING, + QueryStringConstants.SIGNED_CONTENT_LANGUAGE, + QueryStringConstants.SIGNED_CONTENT_TYPE, + QueryStringConstants.START_PK, + QueryStringConstants.START_RK, + QueryStringConstants.END_PK, + QueryStringConstants.END_RK, + QueryStringConstants.SIGNED_RESOURCE_TYPES, + QueryStringConstants.SIGNED_SERVICES, + QueryStringConstants.SIGNED_OID, + QueryStringConstants.SIGNED_TID, + QueryStringConstants.SIGNED_KEY_START, + QueryStringConstants.SIGNED_KEY_EXPIRY, + QueryStringConstants.SIGNED_KEY_SERVICE, + QueryStringConstants.SIGNED_KEY_VERSION, + # for ADLS + QueryStringConstants.SIGNED_AUTHORIZED_OID, + QueryStringConstants.SIGNED_UNAUTHORIZED_OID, + QueryStringConstants.SIGNED_CORRELATION_ID, + QueryStringConstants.SIGNED_DIRECTORY_DEPTH, + ] + + +class SharedAccessSignature(object): + ''' + Provides a factory for creating account access + signature tokens with an account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + :param str x_ms_version: + The service version used to generate the shared access signatures. + ''' + self.account_name = account_name + self.account_key = account_key + self.x_ms_version = x_ms_version + + def generate_account(self, services, resource_types, permission, expiry, start=None, + ip=None, protocol=None): + ''' + Generates a shared access signature for the account. + Use the returned signature with the sas_token parameter of the service + or to create a new account object. + + :param ResourceTypes resource_types: + Specifies the resource types that are accessible with the account + SAS. You can combine values to provide access to more than one + resource type. + :param AccountSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. You can combine + values to provide more than one permission. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + ''' + sas = _SharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_account(services, resource_types) + sas.add_account_signature(self.account_name, self.account_key) + + return sas.get_token() + + +class _SharedAccessHelper(object): + def __init__(self): + self.query_dict = {} + + def _add_query(self, name, val): + if val: + self.query_dict[name] = _str(val) if val is not None else None + + def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): + if isinstance(start, date): + start = _to_utc_datetime(start) + + if isinstance(expiry, date): + expiry = _to_utc_datetime(expiry) + + self._add_query(QueryStringConstants.SIGNED_START, start) + self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) + self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) + self._add_query(QueryStringConstants.SIGNED_IP, ip) + self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) + self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) + + def add_resource(self, resource): + self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) + + def add_id(self, policy_id): + self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) + + def add_account(self, services, resource_types): + self._add_query(QueryStringConstants.SIGNED_SERVICES, services) + self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) + + def add_override_response_headers(self, cache_control, + content_disposition, + content_encoding, + content_language, + content_type): + self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) + self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) + self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) + self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) + self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) + + def add_account_signature(self, account_name, account_key): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + string_to_sign = \ + (account_name + '\n' + + get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + + get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + + get_value_to_append(QueryStringConstants.SIGNED_START) + + get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + get_value_to_append(QueryStringConstants.SIGNED_IP) + + get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(QueryStringConstants.SIGNED_VERSION)) + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key, string_to_sign)) + + def get_token(self): + return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/uploads.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/uploads.py new file mode 100644 index 0000000..1b619df --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/uploads.py @@ -0,0 +1,602 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from concurrent import futures +from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) +from threading import Lock +from itertools import islice +from math import ceil + +import six + +from azure.core.tracing.common import with_current_context + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." + + +def _parallel_uploads(executor, uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(executor.submit(with_current_context(uploader), next_chunk)) + except StopIteration: + break + + # Wait for the remaining uploads to finish + done, _running = futures.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + validate_content=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + validate_content=validate_content, + **kwargs) + if parallel: + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + executor.submit(with_current_context(uploader.process_chunk), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + executor.submit(with_current_context(uploader.process_substream_block), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] + if any(range_ids): + return sorted(range_ids) + return [] + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b"" + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError("Blob data should be of type bytes.") + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b"" or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + def _update_progress(self, length): + if self.progress_lock is not None: + with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = self._upload_chunk(chunk_offset, chunk_data) + self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield index, SubStream(self.stream, index, length, lock) + + def process_substream_block(self, block_data): + return self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + def _upload_substream_block(self, index, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_substream_block_with_progress(self, index, block_stream): + range_id = self._upload_substream_block(index, block_stream) + self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop("modified_access_conditions", None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + self.service.stage_block( + block_id, + len(chunk_data), + chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return index, block_id + + def _upload_substream_block(self, index, block_stream): + try: + block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) + self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + return not any(bytearray(chunk_data)) + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = self.service.upload_pages( + body=chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + def _upload_substream_block(self, index, block_stream): + pass + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + self.current_length = int(self.response_headers["blob_append_offset"]) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + def _upload_substream_block(self, index, block_stream): + pass + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + self.response_headers = self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + def _upload_substream_block(self, index, block_stream): + try: + self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response + + # TODO: Implement this method. + def _upload_substream_block(self, index, block_stream): + pass + + +class SubStream(IOBase): + + def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): + # Python 2.7: file-like objects created with open() typically support seek(), but are not + # derivations of io.IOBase and thus do not implement seekable(). + # Python > 3.0: file-like objects created with open() are derived from io.IOBase. + try: + # only the main thread runs this, so there's no need grabbing the lock + wrapped_stream.seek(0, SEEK_CUR) + except: + raise ValueError("Wrapped stream must support seek().") + + self._lock = lockObj + self._wrapped_stream = wrapped_stream + self._position = 0 + self._stream_begin_index = stream_begin_index + self._length = length + self._buffer = BytesIO() + + # we must avoid buffering more than necessary, and also not use up too much memory + # so the max buffer size is capped at 4MB + self._max_buffer_size = ( + length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE + ) + self._current_buffer_start = 0 + self._current_buffer_size = 0 + super(SubStream, self).__init__() + + def __len__(self): + return self._length + + def close(self): + if self._buffer: + self._buffer.close() + self._wrapped_stream = None + IOBase.close(self) + + def fileno(self): + return self._wrapped_stream.fileno() + + def flush(self): + pass + + def read(self, size=None): + if self.closed: # pylint: disable=using-constant-test + raise ValueError("Stream is closed.") + + if size is None: + size = self._length - self._position + + # adjust if out of bounds + if size + self._position >= self._length: + size = self._length - self._position + + # return fast + if size == 0 or self._buffer.closed: + return b"" + + # attempt first read from the read buffer and update position + read_buffer = self._buffer.read(size) + bytes_read = len(read_buffer) + bytes_remaining = size - bytes_read + self._position += bytes_read + + # repopulate the read buffer from the underlying stream to fulfill the request + # ensure the seek and read operations are done atomically (only if a lock is provided) + if bytes_remaining > 0: + with self._buffer: + # either read in the max buffer size specified on the class + # or read in just enough data for the current block/sub stream + current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) + + # lock is only defined if max_concurrency > 1 (parallel uploads) + if self._lock: + with self._lock: + # reposition the underlying stream to match the start of the data to read + absolute_position = self._stream_begin_index + self._position + self._wrapped_stream.seek(absolute_position, SEEK_SET) + # If we can't seek to the right location, our read will be corrupted so fail fast. + if self._wrapped_stream.tell() != absolute_position: + raise IOError("Stream failed to seek to the desired location.") + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + else: + absolute_position = self._stream_begin_index + self._position + # It's possible that there's connection problem during data transfer, + # so when we retry we don't want to read from current position of wrapped stream, + # instead we should seek to where we want to read from. + if self._wrapped_stream.tell() != absolute_position: + self._wrapped_stream.seek(absolute_position, SEEK_SET) + + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + + if buffer_from_stream: + # update the buffer with new data from the wrapped stream + # we need to note down the start position and size of the buffer, in case seek is performed later + self._buffer = BytesIO(buffer_from_stream) + self._current_buffer_start = self._position + self._current_buffer_size = len(buffer_from_stream) + + # read the remaining bytes from the new buffer and update position + second_read_buffer = self._buffer.read(bytes_remaining) + read_buffer += second_read_buffer + self._position += len(second_read_buffer) + + return read_buffer + + def readable(self): + return True + + def readinto(self, b): + raise UnsupportedOperation + + def seek(self, offset, whence=0): + if whence is SEEK_SET: + start_index = 0 + elif whence is SEEK_CUR: + start_index = self._position + elif whence is SEEK_END: + start_index = self._length + offset = -offset + else: + raise ValueError("Invalid argument for the 'whence' parameter.") + + pos = start_index + offset + + if pos > self._length: + pos = self._length + elif pos < 0: + pos = 0 + + # check if buffer is still valid + # if not, drop buffer + if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: + self._buffer.close() + self._buffer = BytesIO() + else: # if yes seek to correct position + delta = pos - self._current_buffer_start + self._buffer.seek(delta, SEEK_SET) + + self._position = pos + return pos + + def seekable(self): + return True + + def tell(self): + return self._position + + def write(self): + raise UnsupportedOperation + + def writelines(self): + raise UnsupportedOperation + + def writeable(self): + return False + + +class IterStreamer(object): + """ + File-like streaming iterator. + """ + + def __init__(self, generator, encoding="UTF-8"): + self.generator = generator + self.iterator = iter(generator) + self.leftover = b"" + self.encoding = encoding + + def __len__(self): + return self.generator.__len__() + + def __iter__(self): + return self.iterator + + def seekable(self): + return False + + def __next__(self): + return next(self.iterator) + + next = __next__ # Python 2 compatibility. + + def tell(self, *args, **kwargs): + raise UnsupportedOperation("Data generator does not support tell.") + + def seek(self, *args, **kwargs): + raise UnsupportedOperation("Data generator is unseekable.") + + def read(self, size): + data = self.leftover + count = len(self.leftover) + try: + while count < size: + chunk = self.__next__() + if isinstance(chunk, six.text_type): + chunk = chunk.encode(self.encoding) + data += chunk + count += len(chunk) + except StopIteration: + pass + + if count > size: + self.leftover = data[size:] + + return data[:size] diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/uploads_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/uploads_async.py new file mode 100644 index 0000000..5ed192b --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared/uploads_async.py @@ -0,0 +1,395 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +import asyncio +from asyncio import Lock +from itertools import islice +import threading + +from math import ceil + +import six + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .encryption import get_blob_encryptor_and_padder +from .uploads import SubStream, IterStreamer # pylint: disable=unused-import + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' + + +async def _parallel_uploads(uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(asyncio.ensure_future(uploader(next_chunk))) + except StopIteration: + break + + # Wait for the remaining uploads to finish + if running: + done, _running = await asyncio.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +async def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + encryption_options=None, + **kwargs): + + if encryption_options: + encryptor, padder = get_blob_encryptor_and_padder( + encryption_options.get('cek'), + encryption_options.get('vector'), + uploader_class is not PageBlobChunkUploader) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + asyncio.ensure_future(uploader.process_chunk(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [] + for chunk in uploader.get_chunk_streams(): + range_ids.append(await uploader.process_chunk(chunk)) + + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +async def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + **kwargs) + + if parallel: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + asyncio.ensure_future(uploader.process_substream_block(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [] + for block in uploader.get_substream_blocks(): + range_ids.append(await uploader.process_substream_block(block)) + if any(range_ids): + return sorted(range_ids) + return + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__(self, service, total_size, chunk_size, stream, parallel, encryptor=None, padder=None, **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_start = stream.tell() if parallel else None + self.stream_lock = threading.Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b'' + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, six.binary_type): + raise TypeError('Blob data should be of type bytes.') + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b'' or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + async def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + async def _update_progress(self, length): + if self.progress_lock is not None: + async with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + async def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = await self._upload_chunk(chunk_offset, chunk_data) + await self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield index, SubStream(self.stream, index, length, lock) + + async def process_substream_block(self, block_data): + return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + async def _upload_substream_block(self, index, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_substream_block_with_progress(self, index, block_stream): + range_id = await self._upload_substream_block(index, block_stream) + await self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop('modified_access_conditions', None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = '{0:032d}'.format(chunk_offset) + block_id = encode_base64(url_quote(encode_base64(index))) + await self.service.stage_block( + block_id, + len(chunk_data), + body=chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + return index, block_id + + async def _upload_substream_block(self, index, block_stream): + try: + block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) + await self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + for each_byte in chunk_data: + if each_byte not in [0, b'\x00']: + return False + return True + + async def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + computed_md5 = None + self.response_headers = await self.service.upload_pages( + body=chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + async def _upload_substream_block(self, index, block_stream): + pass + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = await self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + self.current_length = int(self.response_headers['blob_append_offset']) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = await self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + async def _upload_substream_block(self, index, block_stream): + pass + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + self.response_headers = await self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + async def _upload_substream_block(self, index, block_stream): + try: + await self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = await self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) + return range_id, response + + # TODO: Implement this method. + async def _upload_substream_block(self, index, block_stream): + pass diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared_access_signature.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared_access_signature.py new file mode 100644 index 0000000..20dad95 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_shared_access_signature.py @@ -0,0 +1,491 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, List, TYPE_CHECKING +) + +from ._shared import sign_string +from ._shared.constants import X_MS_VERSION +from ._shared.models import Services +from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, QueryStringConstants +from ._shared.parser import _str + +if TYPE_CHECKING: + from datetime import datetime + from .import ( + ResourceTypes, + AccountSasPermissions, + ShareSasPermissions, + FileSasPermissions + ) + +class FileSharedAccessSignature(SharedAccessSignature): + ''' + Provides a factory for creating file and share access + signature tokens with a common account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + ''' + super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) + + def generate_file(self, share_name, directory_name=None, file_name=None, + permission=None, expiry=None, start=None, policy_id=None, + ip=None, protocol=None, cache_control=None, + content_disposition=None, content_encoding=None, + content_language=None, content_type=None): + ''' + Generates a shared access signature for the file. + Use the returned signature with the sas_token parameter of FileService. + + :param str share_name: + Name of share. + :param str directory_name: + Name of directory. SAS tokens cannot be created for directories, so + this parameter should only be present if file_name is provided. + :param str file_name: + Name of file. + :param ~azure.storage.fileshare.FileSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, create, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_file_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + resource_path = share_name + if directory_name is not None: + resource_path += '/' + _str(directory_name) if directory_name is not None else None + resource_path += '/' + _str(file_name) if file_name is not None else None + + sas = _FileSharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(policy_id) + sas.add_resource('f') + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_resource_signature(self.account_name, self.account_key, resource_path) + + return sas.get_token() + + def generate_share(self, share_name, permission=None, expiry=None, + start=None, policy_id=None, ip=None, protocol=None, + cache_control=None, content_disposition=None, + content_encoding=None, content_language=None, + content_type=None): + ''' + Generates a shared access signature for the share. + Use the returned signature with the sas_token parameter of FileService. + + :param str share_name: + Name of share. + :param ShareSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, create, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_file_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + sas = _FileSharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(policy_id) + sas.add_resource('s') + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_resource_signature(self.account_name, self.account_key, share_name) + + return sas.get_token() + + +class _FileSharedAccessHelper(_SharedAccessHelper): + + def add_resource_signature(self, account_name, account_key, path): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + if path[0] != '/': + path = '/' + path + + canonicalized_resource = '/file/' + account_name + path + '\n' + + # Form the string to sign from shared_access_policy and canonicalized + # resource. The order of values is important. + string_to_sign = \ + (get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(QueryStringConstants.SIGNED_START) + + get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + canonicalized_resource + + get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) + + get_value_to_append(QueryStringConstants.SIGNED_IP) + + get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(QueryStringConstants.SIGNED_VERSION) + + get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + + get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + + get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + + get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + + get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) + + # remove the trailing newline + if string_to_sign[-1] == '\n': + string_to_sign = string_to_sign[:-1] + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key, string_to_sign)) + + +def generate_account_sas( + account_name, # type: str + account_key, # type: str + resource_types, # type: Union[ResourceTypes, str] + permission, # type: Union[AccountSasPermissions, str] + expiry, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for the file service. + + Use the returned signature with the credential parameter of any ShareServiceClient, + ShareClient, ShareDirectoryClient, or ShareFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + :param ~azure.storage.fileshare.ResourceTypes resource_types: + Specifies the resource types that are accessible with the account SAS. + :param ~azure.storage.fileshare.AccountSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_authentication.py + :start-after: [START generate_sas_token] + :end-before: [END generate_sas_token] + :language: python + :dedent: 8 + :caption: Generate a sas token. + """ + sas = SharedAccessSignature(account_name, account_key) + return sas.generate_account( + services=Services(fileshare=True), + resource_types=resource_types, + permission=permission, + expiry=expiry, + start=start, + ip=ip, + **kwargs + ) # type: ignore + + +def generate_share_sas( + account_name, # type: str + share_name, # type: str + account_key, # type: str + permission=None, # type: Optional[Union[ShareSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + policy_id=None, # type: Optional[str] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): # type: (...) -> str + """Generates a shared access signature for a share. + + Use the returned signature with the credential parameter of any ShareServiceClient, + ShareClient, ShareDirectoryClient, or ShareFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str share_name: + The name of the share. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + :param ~azure.storage.fileshare.ShareSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, create, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + :func:`~azure.storage.fileshare.ShareClient.set_share_access_policy`. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + sas = FileSharedAccessSignature(account_name, account_key) + return sas.generate_share( + share_name=share_name, + permission=permission, + expiry=expiry, + start=start, + policy_id=policy_id, + ip=ip, + **kwargs + ) + + +def generate_file_sas( + account_name, # type: str + share_name, # type: str + file_path, # type: List[str] + account_key, # type: str + permission=None, # type: Optional[Union[FileSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + policy_id=None, # type: Optional[str] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for a file. + + Use the returned signature with the credential parameter of any ShareServiceClient, + ShareClient, ShareDirectoryClient, or ShareFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str share_name: + The name of the share. + :param file_path: + The file path represented as a list of path segments, including the file name. + :type file_path: List[str] + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + :param ~azure.storage.fileshare.FileSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + sas = FileSharedAccessSignature(account_name, account_key) + if len(file_path) > 1: + dir_path = '/'.join(file_path[:-1]) + else: + dir_path = None # type: ignore + return sas.generate_file( # type: ignore + share_name=share_name, + directory_name=dir_path, + file_name=file_path[-1], + permission=permission, + expiry=expiry, + start=start, + policy_id=policy_id, + ip=ip, + **kwargs + ) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/_version.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/_version.py new file mode 100644 index 0000000..c9d0e60 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/_version.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +VERSION = "12.6.0" diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/__init__.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/__init__.py new file mode 100644 index 0000000..73393b8 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/__init__.py @@ -0,0 +1,20 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ._file_client_async import ShareFileClient +from ._directory_client_async import ShareDirectoryClient +from ._share_client_async import ShareClient +from ._share_service_client_async import ShareServiceClient +from ._lease_async import ShareLeaseClient + + +__all__ = [ + 'ShareFileClient', + 'ShareDirectoryClient', + 'ShareClient', + 'ShareServiceClient', + 'ShareLeaseClient', +] diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_directory_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_directory_client_async.py new file mode 100644 index 0000000..ae7767d --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_directory_client_async.py @@ -0,0 +1,606 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import functools +import time +from typing import ( # pylint: disable=unused-import + Optional, Union, Any, Dict, TYPE_CHECKING +) + +from azure.core.async_paging import AsyncItemPaged +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline import AsyncPipeline +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from .._parser import _get_file_permission, _datetime_to_str +from .._shared.parser import _str + +from .._generated.aio import AzureFileStorage +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.policies_async import ExponentialRetry +from .._shared.request_handlers import add_metadata_headers +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._deserialize import deserialize_directory_properties +from .._serialize import get_api_version +from .._directory_client import ShareDirectoryClient as ShareDirectoryClientBase +from ._file_client_async import ShareFileClient +from ._models import DirectoryPropertiesPaged, HandlesPaged + +if TYPE_CHECKING: + from datetime import datetime + from .._models import ShareProperties, DirectoryProperties, ContentSettings, NTFSAttributes + from .._generated.models import HandleItem + + +class ShareDirectoryClient(AsyncStorageAccountHostsMixin, ShareDirectoryClientBase): + """A client to interact with a specific directory, although it may not yet exist. + + For operations relating to a specific subdirectory or file in this share, the clients for those + entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the directory, + use the :func:`from_directory_url` classmethod. + :param share_name: + The name of the share for the directory. + :type share_name: str + :param str directory_path: + The directory path for the directory with which to interact. + If specified, this value will override a directory value specified in the directory URL. + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword loop: + The event loop to run the asynchronous tasks. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( # type: ignore + self, account_url, # type: str + share_name, # type: str + directory_path, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Optional[Any] + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + loop = kwargs.pop('loop', None) + super(ShareDirectoryClient, self).__init__( + account_url, + share_name=share_name, + directory_path=directory_path, + snapshot=snapshot, + credential=credential, + loop=loop, + **kwargs) + self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline, loop=loop) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + self._loop = loop + + def get_file_client(self, file_name, **kwargs): + # type: (str, Any) -> ShareFileClient + """Get a client to interact with a specific file. + + The file need not already exist. + + :param str file_name: + The name of the file. + :returns: A File Client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + if self.directory_path: + file_name = self.directory_path.rstrip('/') + "/" + file_name + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareFileClient( + self.url, file_path=file_name, share_name=self.share_name, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) + + def get_subdirectory_client(self, directory_name, **kwargs): + # type: (str, Any) -> ShareDirectoryClient + """Get a client to interact with a specific subdirectory. + + The subdirectory need not already exist. + + :param str directory_name: + The name of the subdirectory. + :returns: A Directory Client. + :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START get_subdirectory_client] + :end-before: [END get_subdirectory_client] + :language: python + :dedent: 16 + :caption: Gets the subdirectory client. + """ + directory_path = self.directory_path.rstrip('/') + "/" + directory_name + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareDirectoryClient( + self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop, **kwargs) + + @distributed_trace_async + async def create_directory(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Creates a new directory under the directory referenced by the client. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the directory as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Directory-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START create_directory] + :end-before: [END create_directory] + :language: python + :dedent: 16 + :caption: Creates a directory. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return await self._client.directory.create( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def delete_directory(self, **kwargs): + # type: (**Any) -> None + """Marks the directory for deletion. The directory is + later deleted during garbage collection. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START delete_directory] + :end-before: [END delete_directory] + :language: python + :dedent: 16 + :caption: Deletes a directory. + """ + timeout = kwargs.pop('timeout', None) + try: + await self._client.directory.delete(timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_directories_and_files(self, name_starts_with=None, **kwargs): + # type: (Optional[str], Any) -> AsyncItemPaged + """Lists all the directories and files under the directory. + + :param str name_starts_with: + Filters the results to return only entities whose names + begin with the specified prefix. + :keyword list[str] include: + Include this parameter to specify one or more datasets to include in the response. + Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey". + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-10-02'. + + :keyword bool include_extended_info: + If this is set to true, file id will be returned in listed results. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-10-02'. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties + :rtype: ~azure.core.async_paging.AsyncItemPaged[DirectoryProperties and FileProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START lists_directory] + :end-before: [END lists_directory] + :language: python + :dedent: 16 + :caption: List directories and files. + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.directory.list_files_and_directories_segment, + sharesnapshot=self.snapshot, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=DirectoryPropertiesPaged) + + @distributed_trace + def list_handles(self, recursive=False, **kwargs): + # type: (bool, Any) -> AsyncItemPaged + """Lists opened handles on a directory or a file under the directory. + + :param bool recursive: + Boolean that specifies if operation should apply to the directory specified by the client, + its files, its subdirectories and their files. Default value is False. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of HandleItem + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.directory.list_handles, + sharesnapshot=self.snapshot, + timeout=timeout, + recursive=recursive, + **kwargs) + return AsyncItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=HandlesPaged) + + @distributed_trace_async + async def close_handle(self, handle, **kwargs): + # type: (Union[str, HandleItem], Any) -> Dict[str, int] + """Close an open file handle. + + :param handle: + A specific handle to close. + :type handle: str or ~azure.storage.fileshare.Handle + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + try: + handle_id = handle.id # type: ignore + except AttributeError: + handle_id = handle + if handle_id == '*': + raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") + try: + response = await self._client.directory.force_close_handles( + handle_id, + marker=None, + recursive=None, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + return { + 'closed_handles_count': response.get('number_of_handles_closed', 0), + 'failed_handles_count': response.get('number_of_handles_failed', 0) + } + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def close_all_handles(self, recursive=False, **kwargs): + # type: (bool, Any) -> Dict[str, int] + """Close any open file handles. + + This operation will block until the service has closed all open handles. + + :param bool recursive: + Boolean that specifies if operation should apply to the directory specified by the client, + its files, its subdirectories and their files. Default value is False. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + timeout = kwargs.pop('timeout', None) + start_time = time.time() + + try_close = True + continuation_token = None + total_closed = 0 + total_failed = 0 + while try_close: + try: + response = await self._client.directory.force_close_handles( + handle_id='*', + timeout=timeout, + marker=continuation_token, + recursive=recursive, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + continuation_token = response.get('marker') + try_close = bool(continuation_token) + total_closed += response.get('number_of_handles_closed', 0) + total_failed += response.get('number_of_handles_failed', 0) + if timeout: + timeout = max(0, timeout - (time.time() - start_time)) + return { + 'closed_handles_count': total_closed, + 'failed_handles_count': total_failed + } + + @distributed_trace_async + async def get_directory_properties(self, **kwargs): + # type: (Any) -> DirectoryProperties + """Returns all user-defined metadata and system properties for the + specified directory. The data returned does not include the directory's + list of files. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: DirectoryProperties + :rtype: ~azure.storage.fileshare.DirectoryProperties + """ + timeout = kwargs.pop('timeout', None) + try: + response = await self._client.directory.get_properties( + timeout=timeout, + cls=deserialize_directory_properties, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return response # type: ignore + + @distributed_trace_async + async def set_directory_metadata(self, metadata, **kwargs): + # type: (Dict[str, Any], Any) -> Dict[str, Any] + """Sets the metadata for the directory. + + Each call to this operation replaces all existing metadata + attached to the directory. To remove all metadata from the directory, + call this operation with an empty metadata dict. + + :param metadata: + Name-value pairs associated with the directory as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Directory-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + try: + return await self._client.directory.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="preserve", # type: Union[str, datetime] + file_last_write_time="preserve", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Sets HTTP headers on the directory. + + :param file_attributes: + The file system attributes for files and directories. + If not set, indicates preservation of existing values. + Here is an example for when the var type is str: 'Temporary|Archive' + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Preserve. + :type file_creation_time: str or datetime + :param file_last_write_time: Last write time for the file + Default value: Preserve. + :type file_last_write_time: str or datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + file_permission = _get_file_permission(file_permission, permission_key, 'preserve') + try: + return await self._client.directory.set_properties( # type: ignore + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def create_subdirectory( + self, directory_name, # type: str + **kwargs + ): + # type: (...) -> ShareDirectoryClient + """Creates a new subdirectory and returns a client to interact + with the subdirectory. + + :param str directory_name: + The name of the subdirectory. + :keyword dict(str,str) metadata: + Name-value pairs associated with the subdirectory as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: ShareDirectoryClient + :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START create_subdirectory] + :end-before: [END create_subdirectory] + :language: python + :dedent: 16 + :caption: Create a subdirectory. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + subdir = self.get_subdirectory_client(directory_name) + await subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) + return subdir # type: ignore + + @distributed_trace_async + async def delete_subdirectory( + self, directory_name, # type: str + **kwargs + ): + # type: (...) -> None + """Deletes a subdirectory. + + :param str directory_name: + The name of the subdirectory. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START delete_subdirectory] + :end-before: [END delete_subdirectory] + :language: python + :dedent: 16 + :caption: Delete a subdirectory. + """ + timeout = kwargs.pop('timeout', None) + subdir = self.get_subdirectory_client(directory_name) + await subdir.delete_directory(timeout=timeout, **kwargs) + + @distributed_trace_async + async def upload_file( + self, file_name, # type: str + data, # type: Any + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> ShareFileClient + """Creates a new file in the directory and returns a ShareFileClient + to interact with the file. + + :param str file_name: + The name of the file. + :param Any data: + Content of the file. + :param int length: + Length of the file in bytes. Specify its maximum size, up to 1 TiB. + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each range of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str encoding: + Defaults to UTF-8. + :returns: ShareFileClient + :rtype: ~azure.storage.fileshare.aio.ShareFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START upload_file_to_directory] + :end-before: [END upload_file_to_directory] + :language: python + :dedent: 16 + :caption: Upload a file to a directory. + """ + file_client = self.get_file_client(file_name) + await file_client.upload_file( + data, + length=length, + **kwargs) + return file_client # type: ignore + + @distributed_trace_async + async def delete_file( + self, file_name, # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> None + """Marks the specified file for deletion. The file is later + deleted during garbage collection. + + :param str file_name: + The name of the file to delete. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START delete_file_in_directory] + :end-before: [END delete_file_in_directory] + :language: python + :dedent: 16 + :caption: Delete a file in a directory. + """ + file_client = self.get_file_client(file_name) + await file_client.delete_file(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_download_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_download_async.py new file mode 100644 index 0000000..971f12e --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_download_async.py @@ -0,0 +1,492 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import asyncio +import sys +from io import BytesIO +from itertools import islice +import warnings + +from typing import AsyncIterator +from azure.core.exceptions import HttpResponseError, ResourceModifiedError +from .._shared.encryption import decrypt_blob +from .._shared.request_handlers import validate_and_format_range_headers +from .._shared.response_handlers import process_storage_error, parse_length_from_content_range +from .._download import process_range_and_offset, _ChunkDownloader + + +async def process_content(data, start_offset, end_offset, encryption): + if data is None: + raise ValueError("Response cannot be None.") + try: + content = data.response.body() + except Exception as error: + raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) + if encryption.get('key') is not None or encryption.get('resolver') is not None: + try: + return decrypt_blob( + encryption.get('required'), + encryption.get('key'), + encryption.get('resolver'), + content, + start_offset, + end_offset, + data.response.headers) + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=data.response, + error=error) + return content + + +class _AsyncChunkDownloader(_ChunkDownloader): + def __init__(self, **kwargs): + super(_AsyncChunkDownloader, self).__init__(**kwargs) + self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None + self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None + + async def process_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) + length = chunk_end - chunk_start + if length > 0: + await self._write_to_stream(chunk_data, chunk_start) + await self._update_progress(length) + + async def yield_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + return await self._download_chunk(chunk_start, chunk_end - 1) + + async def _update_progress(self, length): + if self.progress_lock: + async with self.progress_lock: # pylint: disable=not-async-context-manager + self.progress_total += length + else: + self.progress_total += length + + async def _write_to_stream(self, chunk_data, chunk_start): + if self.stream_lock: + async with self.stream_lock: # pylint: disable=not-async-context-manager + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + else: + self.stream.write(chunk_data) + + async def _download_chunk(self, chunk_start, chunk_end): + download_range, offset = process_range_and_offset( + chunk_start, chunk_end, chunk_end, self.encryption_options + ) + range_header, range_validation = validate_and_format_range_headers( + download_range[0], + download_range[1], + check_content_md5=self.validate_content + ) + try: + _, response = await self.client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self.validate_content, + data_stream_total=self.total_size, + download_stream_current=self.progress_total, + **self.request_options + ) + if response.properties.etag != self.etag: + raise ResourceModifiedError(message="The file has been modified while downloading.") + except HttpResponseError as error: + process_storage_error(error) + + chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) + return chunk_data + + +class _AsyncChunkIterator(object): + """Async iterator for chunks in blob download stream.""" + + def __init__(self, size, content, downloader, chunk_size): + self.size = size + self._chunk_size = chunk_size + self._current_content = content + self._iter_downloader = downloader + self._iter_chunks = None + self._complete = (size == 0) + + def __len__(self): + return self.size + + def __iter__(self): + raise TypeError("Async stream must be iterated asynchronously.") + + def __aiter__(self): + return self + + async def __anext__(self): + """Iterate through responses.""" + if self._complete: + raise StopAsyncIteration("Download complete") + if not self._iter_downloader: + # cut the data obtained from initial GET into chunks + if len(self._current_content) > self._chunk_size: + return self._get_chunk_data() + self._complete = True + return self._current_content + + if not self._iter_chunks: + self._iter_chunks = self._iter_downloader.get_chunk_offsets() + + # initial GET result still has more than _chunk_size bytes of data + if len(self._current_content) >= self._chunk_size: + return self._get_chunk_data() + + try: + chunk = next(self._iter_chunks) + self._current_content += await self._iter_downloader.yield_chunk(chunk) + except StopIteration: + self._complete = True + # it's likely that there some data left in self._current_content + if self._current_content: + return self._current_content + raise StopAsyncIteration("Download complete") + + return self._get_chunk_data() + + def _get_chunk_data(self): + chunk_data = self._current_content[: self._chunk_size] + self._current_content = self._current_content[self._chunk_size:] + return chunk_data + + +class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the file being downloaded. + :ivar: str path: + The full path of the file. + :ivar str share: + The name of the share where the file is. + :ivar ~azure.storage.fileshare.FileProperties properties: + The properties of the file being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if speficied, + otherwise the total size of the file. + """ + + def __init__( + self, + client=None, + config=None, + start_range=None, + end_range=None, + validate_content=None, + encryption_options=None, + max_concurrency=1, + name=None, + path=None, + share=None, + encoding=None, + **kwargs + ): + self.name = name + self.path = path + self.share = share + self.properties = None + self.size = None + + self._client = client + self._config = config + self._start_range = start_range + self._end_range = end_range + self._max_concurrency = max_concurrency + self._encoding = encoding + self._validate_content = validate_content + self._encryption_options = encryption_options or {} + self._request_options = kwargs + self._location_mode = None + self._download_complete = False + self._current_content = None + self._file_size = None + self._response = None + self._etag = None + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + self._first_get_size = self._config.max_single_get_size if not self._validate_content \ + else self._config.max_chunk_get_size + initial_request_start = self._start_range if self._start_range is not None else 0 + if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: + initial_request_end = self._end_range + else: + initial_request_end = initial_request_start + self._first_get_size - 1 + + self._initial_range, self._initial_offset = process_range_and_offset( + initial_request_start, initial_request_end, self._end_range, self._encryption_options + ) + + def __len__(self): + return self.size + + async def _setup(self): + self._response = await self._initial_request() + self.properties = self._response.properties + self.properties.name = self.name + self.properties.path = self.path + self.properties.share = self.share + + # Set the content length to the download size instead of the size of + # the last range + self.properties.size = self.size + + # Overwrite the content range to the user requested range + self.properties.content_range = 'bytes {0}-{1}/{2}'.format( + self._start_range, + self._end_range, + self._file_size + ) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + self.properties.content_md5 = None + + if self.size == 0: + self._current_content = b"" + else: + self._current_content = await process_content( + self._response, + self._initial_offset[0], + self._initial_offset[1], + self._encryption_options + ) + + async def _initial_request(self): + range_header, range_validation = validate_and_format_range_headers( + self._initial_range[0], + self._initial_range[1], + start_range_required=False, + end_range_required=False, + check_content_md5=self._validate_content) + + try: + location_mode, response = await self._client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self._validate_content, + data_stream_total=None, + download_stream_current=0, + **self._request_options) + + # Check the location we read from to ensure we use the same one + # for subsequent requests. + self._location_mode = location_mode + + # Parse the total file size and adjust the download size if ranges + # were specified + self._file_size = parse_length_from_content_range(response.properties.content_range) + if self._end_range is not None: + # Use the length unless it is over the end of the file + self.size = min(self._file_size, self._end_range - self._start_range + 1) + elif self._start_range is not None: + self.size = self._file_size - self._start_range + else: + self.size = self._file_size + + except HttpResponseError as error: + if self._start_range is None and error.response.status_code == 416: + # Get range will fail on an empty file. If the user did not + # request a range, do a regular get request in order to get + # any properties. + try: + _, response = await self._client.download( + validate_content=self._validate_content, + data_stream_total=0, + download_stream_current=0, + **self._request_options) + except HttpResponseError as error: + process_storage_error(error) + + # Set the download size to empty + self.size = 0 + self._file_size = 0 + else: + process_storage_error(error) + + # If the file is small, the download is complete at this point. + # If file size is large, download the rest of the file in chunks. + if response.properties.size == self.size: + self._download_complete = True + self._etag = response.properties.etag + return response + + def chunks(self): + # type: () -> AsyncIterator[bytes] + """Iterate over chunks in the download stream. + + :rtype: AsyncIterator[bytes] + """ + if self.size == 0 or self._download_complete: + iter_downloader = None + else: + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + iter_downloader = _AsyncChunkDownloader( + client=self._client, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # Start where the first download ended + end_range=data_end, + stream=None, + parallel=False, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + etag=self._etag, + **self._request_options) + return _AsyncChunkIterator( + size=self.size, + content=self._current_content, + downloader=iter_downloader, + chunk_size=self._config.max_chunk_get_size + ) + + async def readall(self): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + :rtype: bytes or str + """ + stream = BytesIO() + await self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + async def content_as_bytes(self, max_concurrency=1): + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :rtype: bytes + """ + warnings.warn( + "content_as_bytes is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + return await self.readall() + + async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): + """Download the contents of this file, and decode as text. + + This operation is blocking until all data is downloaded. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :param str encoding: + Test encoding to decode the downloaded bytes. Default is UTF-8. + :rtype: str + """ + warnings.warn( + "content_as_text is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self._encoding = encoding + return await self.readall() + + async def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + # the stream must be seekable if parallel download is required + parallel = self._max_concurrency > 1 + if parallel: + error_message = "Target stream handle must be seekable." + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(error_message) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(error_message) + + # Write the content to the user stream + stream.write(self._current_content) + if self._download_complete: + return self.size + + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + downloader = _AsyncChunkDownloader( + client=self._client, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # start where the first download ended + end_range=data_end, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + use_location=self._location_mode, + etag=self._etag, + **self._request_options) + + dl_tasks = downloader.get_chunk_offsets() + running_futures = [ + asyncio.ensure_future(downloader.process_chunk(d)) + for d in islice(dl_tasks, 0, self._max_concurrency) + ] + while running_futures: + # Wait for some download to finish before adding a new one + _done, running_futures = await asyncio.wait( + running_futures, return_when=asyncio.FIRST_COMPLETED) + try: + next_chunk = next(dl_tasks) + except StopIteration: + break + else: + running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) + + if running_futures: + # Wait for the remaining downloads to finish + await asyncio.wait(running_futures) + return self.size + + async def download_to_stream(self, stream, max_concurrency=1): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The properties of the downloaded file. + :rtype: Any + """ + warnings.warn( + "download_to_stream is deprecated, use readinto instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + await self.readinto(stream) + return self.properties diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_file_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_file_client_async.py new file mode 100644 index 0000000..f4ee50f --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_file_client_async.py @@ -0,0 +1,1205 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines, invalid-overridden-method, too-many-public-methods +import functools +import time +from io import BytesIO +from typing import Optional, Union, IO, List, Tuple, Dict, Any, Iterable, TYPE_CHECKING # pylint: disable=unused-import + +import six +from azure.core.async_paging import AsyncItemPaged +from azure.core.exceptions import HttpResponseError + +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from .._parser import _datetime_to_str, _get_file_permission +from .._shared.parser import _str + +from .._generated.aio import AzureFileStorage +from .._generated.models import FileHTTPHeaders +from .._shared.policies_async import ExponentialRetry +from .._shared.uploads_async import upload_data_chunks, FileChunkUploader, IterStreamer +from .._shared.base_client_async import AsyncStorageAccountHostsMixin +from .._shared.request_handlers import add_metadata_headers, get_length +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result +from .._serialize import get_access_conditions, get_smb_properties, get_api_version +from .._file_client import ShareFileClient as ShareFileClientBase +from ._models import HandlesPaged +from ._lease_async import ShareLeaseClient +from ._download_async import StorageStreamDownloader + +if TYPE_CHECKING: + from datetime import datetime + from .._models import ShareProperties, ContentSettings, FileProperties, NTFSAttributes + from .._generated.models import HandleItem + + +async def _upload_file_helper( + client, + stream, + size, + metadata, + content_settings, + validate_content, + timeout, + max_concurrency, + file_settings, + file_attributes="none", + file_creation_time="now", + file_last_write_time="now", + file_permission=None, + file_permission_key=None, + **kwargs +): + try: + if size is None or size < 0: + raise ValueError("A content size must be specified for a File.") + response = await client.create_file( + size, content_settings=content_settings, metadata=metadata, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_permission=file_permission, + permission_key=file_permission_key, + timeout=timeout, + **kwargs + ) + if size == 0: + return response + + responses = await upload_data_chunks( + service=client, + uploader_class=FileChunkUploader, + total_size=size, + chunk_size=file_settings.max_range_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + timeout=timeout, + **kwargs + ) + return sorted(responses, key=lambda r: r.get('last_modified'))[-1] + except HttpResponseError as error: + process_storage_error(error) + + +class ShareFileClient(AsyncStorageAccountHostsMixin, ShareFileClientBase): + """A client to interact with a specific file, although that file may not yet exist. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the + file, use the :func:`from_file_url` classmethod. + :param share_name: + The name of the share for the file. + :type share_name: str + :param str file_path: + The file path to the file with which to interact. If specified, this value will override + a file value specified in the file URL. + :param str snapshot: + An optional file snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword loop: + The event loop to run the asynchronous tasks. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + + def __init__( # type: ignore + self, + account_url, # type: str + share_name, # type: str + file_path, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + loop = kwargs.pop('loop', None) + super(ShareFileClient, self).__init__( + account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, + credential=credential, loop=loop, **kwargs + ) + self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline, loop=loop) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + self._loop = loop + + @distributed_trace_async + async def acquire_lease(self, lease_id=None, **kwargs): + # type: (Optional[str], **Any) -> ShareLeaseClient + """Requests a new lease. + + If the file does not have an active lease, the File + Service creates a lease on the blob and returns a new lease. + + :param str lease_id: + Proposed lease ID, in a GUID string format. The File Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A ShareLeaseClient object. + :rtype: ~azure.storage.fileshare.aio.ShareLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START acquire_lease_on_blob] + :end-before: [END acquire_lease_on_blob] + :language: python + :dedent: 8 + :caption: Acquiring a lease on a blob. + """ + kwargs['lease_duration'] = -1 + lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore + await lease.acquire(**kwargs) + return lease + + @distributed_trace_async + async def create_file( # type: ignore + self, + size, # type: int + file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="now", # type: Union[str, datetime] + file_last_write_time="now", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Creates a new file. + + Note that it only initializes the file with no content. + + :param int size: Specifies the maximum size for the file, + up to 1 TB. + :param file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "None" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Now. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Now. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START create_file] + :end-before: [END create_file] + :language: python + :dedent: 16 + :caption: Create a file. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + content_settings = kwargs.pop('content_settings', None) + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + if self.require_encryption and not self.key_encryption_key: + raise ValueError("Encryption required but no key was provided.") + + headers = kwargs.pop("headers", {}) + headers.update(add_metadata_headers(metadata)) + file_http_headers = None + if content_settings: + file_http_headers = FileHTTPHeaders( + file_cache_control=content_settings.cache_control, + file_content_type=content_settings.content_type, + file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + file_content_encoding=content_settings.content_encoding, + file_content_language=content_settings.content_language, + file_content_disposition=content_settings.content_disposition, + ) + file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') + try: + return await self._client.file.create( # type: ignore + file_content_length=size, + metadata=metadata, + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + file_http_headers=file_http_headers, + lease_access_conditions=access_conditions, + headers=headers, + timeout=timeout, + cls=return_response_headers, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_file( + self, data, # type: Any + length=None, # type: Optional[int] + file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="now", # type: Union[str, datetime] + file_last_write_time="now", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Uploads a new file. + + :param Any data: + Content of the file. + :param int length: + Length of the file in bytes. Specify its maximum size, up to 1 TiB. + :param file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "None" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes + :param file_creation_time: Creation time for the file + Default value: Now. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Now. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each range of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword str encoding: + Defaults to UTF-8. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START upload_file] + :end-before: [END upload_file] + :language: python + :dedent: 16 + :caption: Upload a file. + """ + metadata = kwargs.pop('metadata', None) + content_settings = kwargs.pop('content_settings', None) + max_concurrency = kwargs.pop('max_concurrency', 1) + validate_content = kwargs.pop('validate_content', False) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + + if isinstance(data, six.text_type): + data = data.encode(encoding) + if length is None: + length = get_length(data) + if isinstance(data, bytes): + data = data[:length] + + if isinstance(data, bytes): + stream = BytesIO(data) + elif hasattr(data, "read"): + stream = data + elif hasattr(data, "__iter__"): + stream = IterStreamer(data, encoding=encoding) # type: ignore + else: + raise TypeError("Unsupported data type: {}".format(type(data))) + return await _upload_file_helper( # type: ignore + self, + stream, + length, + metadata, + content_settings, + validate_content, + timeout, + max_concurrency, + self._config, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_permission=file_permission, + file_permission_key=permission_key, + **kwargs + ) + + @distributed_trace_async + async def start_copy_from_url(self, source_url, **kwargs): + # type: (str, Any) -> Any + """Initiates the copying of data from a source URL into the file + referenced by the client. + + The status of this copy operation can be found using the `get_properties` + method. + + :param str source_url: + Specifies the URL of the source file. + :keyword str file_permission: + If specified the permission (security descriptor) shall be set for the directory/file. + This value can be set to "source" to copy the security descriptor from the source file. + Otherwise if set, this value will be used to override the source value. If not set, permission value + is inherited from the parent directory of the target file. This setting can be + used if Permission size is <= 8KB, otherwise permission_key shall be used. + If SDDL is specified as input, it must have owner, group and dacl. + Note: Only one of the file_permission or permission_key should be specified. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword str permission_key: + Key of the permission to be set for the directory/file. + This value can be set to "source" to copy the security descriptor from the source file. + Otherwise if set, this value will be used to override the source value. If not set, permission value + is inherited from the parent directory of the target file. + Note: Only one of the file_permission or permission_key should be specified. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword file_attributes: + This value can be set to "source" to copy file attributes from the source file to the target file, + or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes + to set on the target file. If this is not set, the default value is "Archive". + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :keyword file_creation_time: + This value can be set to "source" to copy the creation time from the source file to the target file, + or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. + If this is not set, creation time will be set to the date time value of the creation + (or when it was overwritten) of the target file by copy engine. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_creation_time: str or ~datetime.datetime + :keyword file_last_write_time: + This value can be set to "source" to copy the last write time from the source file to the target file, or + a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. + If this is not set, value will be the last write time to the file by the copy engine. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_last_write_time: str or ~datetime.datetime + :keyword bool ignore_read_only: + Specifies the option to overwrite the target file if it already exists and has read-only attribute set. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword bool set_archive_attribute: + Specifies the option to set the archive attribute on the target file. + True means the archive attribute will be set on the target file despite attribute + overrides or the source file state. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START copy_file_from_url] + :end-before: [END copy_file_from_url] + :language: python + :dedent: 16 + :caption: Copy a file from a URL + """ + metadata = kwargs.pop('metadata', None) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop("headers", {}) + headers.update(add_metadata_headers(metadata)) + kwargs.update(get_smb_properties(kwargs)) + try: + return await self._client.file.start_copy( + source_url, + metadata=metadata, + lease_access_conditions=access_conditions, + headers=headers, + cls=return_response_headers, + timeout=timeout, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def abort_copy(self, copy_id, **kwargs): + # type: (Union[str, FileProperties], Any) -> None + """Abort an ongoing copy operation. + + This will leave a destination file with zero length and full metadata. + This will raise an error if the copy operation has already ended. + + :param copy_id: + The copy operation to abort. This can be either an ID, or an + instance of FileProperties. + :type copy_id: str or ~azure.storage.fileshare.FileProperties + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + copy_id = copy_id.copy.id + except AttributeError: + try: + copy_id = copy_id["copy_id"] + except TypeError: + pass + try: + await self._client.file.abort_copy(copy_id=copy_id, + lease_access_conditions=access_conditions, + timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def download_file( + self, + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs + ): + # type: (Optional[int], Optional[int], Any) -> StorageStreamDownloader + """Downloads a file to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the file into + a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. + + :param int offset: + Start of byte range to use for downloading a section of the file. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.fileshare.aio.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START download_file] + :end-before: [END download_file] + :language: python + :dedent: 16 + :caption: Download a file. + """ + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + if length is not None and offset is None: + raise ValueError("Offset value must not be None if length is set.") + + range_end = None + if length is not None: + range_end = offset + length - 1 # Service actually uses an end-range inclusive index + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + downloader = StorageStreamDownloader( + client=self._client.file, + config=self._config, + start_range=offset, + end_range=range_end, + encryption_options=None, + name=self.file_name, + path='/'.join(self.file_path), + share=self.share_name, + lease_access_conditions=access_conditions, + cls=deserialize_file_stream, + **kwargs + ) + await downloader._setup() # pylint: disable=protected-access + return downloader + + @distributed_trace_async + async def delete_file(self, **kwargs): + # type: (Any) -> None + """Marks the specified file for deletion. The file is + later deleted during garbage collection. + + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START delete_file] + :end-before: [END delete_file] + :language: python + :dedent: 16 + :caption: Delete a file. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + await self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_file_properties(self, **kwargs): + # type: (Any) -> FileProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file. + + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: FileProperties + :rtype: ~azure.storage.fileshare.FileProperties + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + file_props = await self._client.file.get_properties( + sharesnapshot=self.snapshot, + lease_access_conditions=access_conditions, + timeout=timeout, + cls=deserialize_file_properties, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + file_props.name = self.file_name + file_props.share = self.share_name + file_props.snapshot = self.snapshot + file_props.path = "/".join(self.file_path) + return file_props # type: ignore + + @distributed_trace_async + async def set_http_headers(self, content_settings, # type: ContentSettings + file_attributes="preserve", # type: Union[str, NTFSAttributes] + file_creation_time="preserve", # type: Union[str, datetime] + file_last_write_time="preserve", # type: Union[str, datetime] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Sets HTTP headers on the file. + + :param ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param file_attributes: + The file system attributes for files and directories. + If not set, indicates preservation of existing values. + Here is an example for when the var type is str: 'Temporary|Archive' + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Preserve. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Preserve. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + file_content_length = kwargs.pop("size", None) + file_http_headers = FileHTTPHeaders( + file_cache_control=content_settings.cache_control, + file_content_type=content_settings.content_type, + file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + file_content_encoding=content_settings.content_encoding, + file_content_language=content_settings.content_language, + file_content_disposition=content_settings.content_disposition, + ) + file_permission = _get_file_permission(file_permission, permission_key, 'preserve') + try: + return await self._client.file.set_http_headers( # type: ignore + file_content_length=file_content_length, + file_http_headers=file_http_headers, + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_permission=file_permission, + file_permission_key=permission_key, + lease_access_conditions=access_conditions, + timeout=timeout, + cls=return_response_headers, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_file_metadata(self, metadata=None, **kwargs): # type: ignore + # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] + """Sets user-defined metadata for the specified file as one or more + name-value pairs. + + Each call to this operation replaces all existing metadata + attached to the file. To remove all metadata from the file, + call this operation with no metadata dict. + + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop("headers", {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return await self._client.file.set_metadata( # type: ignore + metadata=metadata, lease_access_conditions=access_conditions, + timeout=timeout, cls=return_response_headers, headers=headers, **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_range( # type: ignore + self, + data, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Upload a range of bytes to a file. + + :param bytes data: + The data to upload. + :param int offset: + Start of byte range to use for uploading a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for uploading a section of the file. + The range can be up to 4 MB in size. + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + file. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str encoding: + Defaults to UTF-8. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + validate_content = kwargs.pop('validate_content', False) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Encryption not supported.") + if isinstance(data, six.text_type): + data = data.encode(encoding) + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + try: + return await self._client.file.upload_range( # type: ignore + range=content_range, + content_length=length, + optionalbody=data, + timeout=timeout, + validate_content=validate_content, + lease_access_conditions=access_conditions, + cls=return_response_headers, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_range_from_url(self, source_url, + offset, + length, + source_offset, + **kwargs + ): + # type: (str, int, int, int, **Any) -> Dict[str, Any] + """ + Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. + + :param int offset: + Start of byte range to use for updating a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for updating a section of the file. + The range can be up to 4 MB in size. + :param str source_url: + A URL of up to 2 KB in length that specifies an Azure file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.file.core.windows.net/myshare/mydir/myfile + https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (length-offset). + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + """ + options = self._upload_range_from_url_options( + source_url=source_url, + offset=offset, + length=length, + source_offset=source_offset, + **kwargs + ) + try: + return await self._client.file.upload_range_from_url(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_ranges( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> List[Dict[str, int]] + """Returns the list of valid page ranges for a file or snapshot + of a file. + + :param int offset: + Specifies the start offset of bytes over which to get ranges. + :param int length: + Number of bytes to use over which to get ranges. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A list of valid ranges. + :rtype: List[dict[str, int]] + """ + options = self._get_ranges_options( + offset=offset, + length=length, + **kwargs) + try: + ranges = await self._client.file.get_range_list(**options) + except HttpResponseError as error: + process_storage_error(error) + return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] + + @distributed_trace_async + async def get_ranges_diff( # type: ignore + self, + previous_sharesnapshot, # type: Union[str, Dict[str, Any]] + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a file or snapshot + of a file. + + .. versionadded:: 12.6.0 + + :param int offset: + Specifies the start offset of bytes over which to get ranges. + :param int length: + Number of bytes to use over which to get ranges. + :param str previous_sharesnapshot: + The snapshot diff parameter that contains an opaque DateTime value that + specifies a previous file snapshot to be compared + against a more recent snapshot or the current file. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys. + The first element are filled file ranges, the 2nd element is cleared file ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_ranges_options( + offset=offset, + length=length, + previous_sharesnapshot=previous_sharesnapshot, + **kwargs) + try: + ranges = await self._client.file.get_range_list(**options) + except HttpResponseError as error: + process_storage_error(error) + return get_file_ranges_result(ranges) + + @distributed_trace_async + async def clear_range( # type: ignore + self, + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Clears the specified range and releases the space used in storage for + that range. + + :param int offset: + Start of byte range to use for clearing a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for clearing a section of the file. + The range can be up to 4 MB in size. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError("Unsupported method for encryption.") + + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 bytes file size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 bytes file size") + end_range = length + offset - 1 # Reformat to an inclusive range index + content_range = "bytes={0}-{1}".format(offset, end_range) + try: + return await self._client.file.upload_range( # type: ignore + timeout=timeout, + cls=return_response_headers, + content_length=0, + optionalbody=None, + file_range_write="clear", + range=content_range, + lease_access_conditions=access_conditions, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def resize_file(self, size, **kwargs): + # type: (int, Any) -> Dict[str, Any] + """Resizes a file to the specified size. + + :param int size: + Size to resize file to (in bytes) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + return await self._client.file.set_http_headers( # type: ignore + file_content_length=size, + file_attributes="preserve", + file_creation_time="preserve", + file_last_write_time="preserve", + file_permission="preserve", + lease_access_conditions=access_conditions, + cls=return_response_headers, + timeout=timeout, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_handles(self, **kwargs): + # type: (Any) -> AsyncItemPaged + """Lists handles for file. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of HandleItem + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.HandleItem] + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop("results_per_page", None) + command = functools.partial( + self._client.file.list_handles, + sharesnapshot=self.snapshot, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=HandlesPaged) + + @distributed_trace_async + async def close_handle(self, handle, **kwargs): + # type: (Union[str, HandleItem], Any) -> Dict[str, int] + """Close an open file handle. + + :param handle: + A specific handle to close. + :type handle: str or ~azure.storage.fileshare.Handle + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + try: + handle_id = handle.id # type: ignore + except AttributeError: + handle_id = handle + if handle_id == '*': + raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") + try: + response = await self._client.file.force_close_handles( + handle_id, + marker=None, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + return { + 'closed_handles_count': response.get('number_of_handles_closed', 0), + 'failed_handles_count': response.get('number_of_handles_failed', 0) + } + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def close_all_handles(self, **kwargs): + # type: (Any) -> Dict[str, int] + """Close any open file handles. + + This operation will block until the service has closed all open handles. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: + The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + timeout = kwargs.pop('timeout', None) + start_time = time.time() + + try_close = True + continuation_token = None + total_closed = 0 + total_failed = 0 + while try_close: + try: + response = await self._client.file.force_close_handles( + handle_id='*', + timeout=timeout, + marker=continuation_token, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + continuation_token = response.get('marker') + try_close = bool(continuation_token) + total_closed += response.get('number_of_handles_closed', 0) + total_failed += response.get('number_of_handles_failed', 0) + if timeout: + timeout = max(0, timeout - (time.time() - start_time)) + return { + 'closed_handles_count': total_closed, + 'failed_handles_count': total_failed + } diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_lease_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_lease_async.py new file mode 100644 index 0000000..0d99845 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_lease_async.py @@ -0,0 +1,228 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, + TypeVar, TYPE_CHECKING +) + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator_async import distributed_trace_async + +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._generated.aio.operations import FileOperations, ShareOperations +from .._lease import ShareLeaseClient as LeaseClientBase + +if TYPE_CHECKING: + from datetime import datetime + ShareFileClient = TypeVar("ShareFileClient") + ShareClient = TypeVar("ShareClient") + + +class ShareLeaseClient(LeaseClientBase): + """Creates a new ShareLeaseClient. + + This client provides lease operations on a ShareClient or ShareFileClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the file or share to lease. + :type client: ~azure.storage.fileshare.ShareFileClient or + ~azure.storage.fileshare.ShareClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + + def __enter__(self): + raise TypeError("Async lease must use 'async with'.") + + def __exit__(self, *args): + self.release() + + async def __aenter__(self): + return self + + async def __aexit__(self, *args): + await self.release() + + @distributed_trace_async + async def acquire(self, **kwargs): + # type: (**Any) -> None + """Requests a new lease. This operation establishes and manages a lock on a + file or share for write and delete operations. If the file or share does not have an active lease, + the File or Share service creates a lease on the file or share. If the file has an active lease, + you can only request a new lease using the active lease ID. + + + If the file or share does not have an active lease, the File or Share service creates a + lease on the file and returns a new lease ID. + + :keyword int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be + between 15 and 60 seconds. A share lease duration cannot be changed + using renew or change. Default is -1 (infinite share lease). + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + try: + lease_duration = kwargs.pop('lease_duration', -1) + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = await self._client.acquire_lease( + timeout=kwargs.pop('timeout', None), + duration=lease_duration, + proposed_lease_id=self.id, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + self.etag = response.get('etag') # type: str + + @distributed_trace_async + async def renew(self, **kwargs): + # type: (Any) -> None + """Renews the share lease. + + The share lease can be renewed if the lease ID specified in the + lease client matches that associated with the share. Note that + the lease may be renewed even if it has expired as long as the share + has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + .. versionadded:: 12.6.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + if isinstance(self._client, FileOperations): + raise TypeError("Lease renewal operations are only valid for ShareClient.") + try: + response = await self._client.renew_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + sharesnapshot=self._snapshot, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def release(self, **kwargs): + # type: (Any) -> None + """Releases the lease. The lease may be released if the lease ID specified on the request matches + that associated with the share or file. Releasing the lease allows another client to immediately acquire + the lease for the share or file as soon as the release is complete. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + try: + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = await self._client.release_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and + a new lease ID in x-ms-proposed-lease-id. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The File or Share service raises an error + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: None + """ + try: + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = await self._client.change_lease( + lease_id=self.id, + proposed_lease_id=proposed_lease_id, + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def break_lease(self, **kwargs): + # type: (Any) -> int + """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. An infinite lease breaks immediately. + + Once a lease is broken, it cannot be changed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :keyword int lease_break_period: + This is the proposed duration of seconds that the share lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the share lease. If longer, the time remaining on the share lease is used. + A new share lease will not be available before the break period has + expired, but the share lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration share lease breaks after the remaining share lease + period elapses, and an infinite share lease breaks immediately. + + .. versionadded:: 12.5.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + try: + lease_break_period = kwargs.pop('lease_break_period', None) + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + if isinstance(self._client, ShareOperations): + kwargs['break_period'] = lease_break_period + if isinstance(self._client, FileOperations) and lease_break_period: + raise TypeError("Setting a lease break period is only applicable to Share leases.") + + response = await self._client.break_lease( + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return response.get('lease_time') # type: ignore diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_models.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_models.py new file mode 100644 index 0000000..e81133c --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_models.py @@ -0,0 +1,178 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines + +from azure.core.async_paging import AsyncPageIterator +from azure.core.exceptions import HttpResponseError + +from .._shared.response_handlers import return_context_and_deserialized, process_storage_error +from .._generated.models import DirectoryItem +from .._models import Handle, ShareProperties, DirectoryProperties, FileProperties + + +def _wrap_item(item): + if isinstance(item, DirectoryItem): + return {'name': item.name, 'is_directory': True} + return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} + + +class SharePropertiesPaged(AsyncPageIterator): + """An iterable of Share properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.fileshare.ShareProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only shares whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(SharePropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access + return self._response.next_marker or None, self.current_page + + +class HandlesPaged(AsyncPageIterator): + """An iterable of Handles. + + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str marker: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.fileshare.Handle) + + :param callable command: Function to retrieve the next page of items. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, results_per_page=None, continuation_token=None): + super(HandlesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access + return self._response.next_marker or None, self.current_page + + +class DirectoryPropertiesPaged(AsyncPageIterator): + """An iterable for the contents of a directory. + + This iterable will yield dicts for the contents of the directory. The dicts + will have the keys 'name' (str) and 'is_directory' (bool). + Items that are files (is_directory=False) will have an additional 'content_length' key. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(dict(str, Any)) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only directories whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(DirectoryPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + prefix=self.prefix, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [DirectoryProperties._from_generated(i) for i in self._response.segment.directory_items] # pylint: disable = protected-access + self.current_page.extend([FileProperties._from_generated(i) for i in self._response.segment.file_items]) # pylint: disable = protected-access + return self._response.next_marker or None, self.current_page diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_share_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_share_client_async.py new file mode 100644 index 0000000..f8ded64 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_share_client_async.py @@ -0,0 +1,756 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +from typing import ( # pylint: disable=unused-import + Optional, Union, Dict, Any, Iterable, TYPE_CHECKING +) + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.pipeline import AsyncPipeline +from .._shared.policies_async import ExponentialRetry +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.request_handlers import add_metadata_headers, serialize_iso +from .._shared.response_handlers import ( + return_response_headers, + process_storage_error, + return_headers_and_deserialized) +from .._generated.aio import AzureFileStorage +from .._generated.models import ( + SignedIdentifier, + DeleteSnapshotsOptionType) +from .._deserialize import deserialize_share_properties, deserialize_permission +from .._serialize import get_api_version, get_access_conditions +from .._share_client import ShareClient as ShareClientBase +from ._directory_client_async import ShareDirectoryClient +from ._file_client_async import ShareFileClient +from ..aio._lease_async import ShareLeaseClient +from .._models import ShareProtocols + +if TYPE_CHECKING: + from .._models import ShareProperties, AccessPolicy + + +class ShareClient(AsyncStorageAccountHostsMixin, ShareClientBase): + """A client to interact with a specific share, although that share may not yet exist. + + For operations relating to a specific directory or file in this share, the clients for + those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the share, + use the :func:`from_share_url` classmethod. + :param share_name: + The name of the share with which to interact. + :type share_name: str + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword loop: + The event loop to run the asynchronous tasks. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( # type: ignore + self, account_url, # type: str + share_name, # type: str + snapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + loop = kwargs.pop('loop', None) + super(ShareClient, self).__init__( + account_url, + share_name=share_name, + snapshot=snapshot, + credential=credential, + loop=loop, + **kwargs) + self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline, loop=loop) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + self._loop = loop + + def get_directory_client(self, directory_path=None): + # type: (Optional[str]) -> ShareDirectoryClient + """Get a client to interact with the specified directory. + The directory need not already exist. + + :param str directory_path: + Path to the specified directory. + :returns: A Directory Client. + :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient + """ + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + + return ShareDirectoryClient( + self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) + + def get_file_client(self, file_path): + # type: (str) -> ShareFileClient + """Get a client to interact with the specified file. + The file need not already exist. + + :param str file_path: + Path to the specified file. + :returns: A File Client. + :rtype: ~azure.storage.fileshare.aio.ShareFileClient + """ + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + + return ShareFileClient( + self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) + + @distributed_trace_async() + async def acquire_lease(self, **kwargs): + # type: (**Any) -> ShareLeaseClient + """Requests a new lease. + + If the share does not have an active lease, the Share + Service creates a lease on the share and returns a new lease. + + .. versionadded:: 12.5.0 + + :keyword int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword str lease_id: + Proposed lease ID, in a GUID string format. The Share Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A ShareLeaseClient object. + :rtype: ~azure.storage.fileshare.ShareLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START acquire_lease_on_share] + :end-before: [END acquire_lease_on_share] + :language: python + :dedent: 8 + :caption: Acquiring a lease on a share. + """ + kwargs['lease_duration'] = kwargs.pop('lease_duration', -1) + lease_id = kwargs.pop('lease_id', None) + lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore + await lease.acquire(**kwargs) + return lease + + @distributed_trace_async + async def create_share(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Creates a new Share under the account. If a share with the + same name already exists, the operation fails. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the share as metadata. + :keyword int quota: + The quota to be allotted. + :keyword access_tier: + Specifies the access tier of the share. + Possible values: 'TransactionOptimized', 'Hot', 'Cool' + :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier + + .. versionadded:: 12.4.0 + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword protocols: + Protocols to enable on the share. Only one protocol can be enabled on the share. + :paramtype protocols: str or ~azure.storage.fileshare.ShareProtocols + :keyword root_squash: + Root squash to set on the share. + Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. + :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START create_share] + :end-before: [END create_share] + :language: python + :dedent: 12 + :caption: Creates a file share. + """ + metadata = kwargs.pop('metadata', None) + quota = kwargs.pop('quota', None) + access_tier = kwargs.pop('access_tier', None) + timeout = kwargs.pop('timeout', None) + root_squash = kwargs.pop('root_squash', None) + protocols = kwargs.pop('protocols', None) + if protocols and protocols not in ['NFS', 'SMB', ShareProtocols.SMB, ShareProtocols.NFS]: + raise ValueError("The enabled protocol must be set to either SMB or NFS.") + if root_squash and protocols not in ['NFS', ShareProtocols.NFS]: + raise ValueError("The 'root_squash' keyword can only be used on NFS enabled shares.") + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + + try: + return await self._client.share.create( # type: ignore + timeout=timeout, + metadata=metadata, + quota=quota, + access_tier=access_tier, + root_squash=root_squash, + enabled_protocols=protocols, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def create_snapshot( # type: ignore + self, + **kwargs # type: Optional[Any] + ): + # type: (...) -> Dict[str, Any] + """Creates a snapshot of the share. + + A snapshot is a read-only version of a share that's taken at a point in time. + It can be read, copied, or deleted, but not modified. Snapshots provide a way + to back up a share as it appears at a moment in time. + + A snapshot of a share has the same name as the base share from which the snapshot + is taken, with a DateTime value appended to indicate the time at which the + snapshot was taken. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the share as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START create_share_snapshot] + :end-before: [END create_share_snapshot] + :language: python + :dedent: 16 + :caption: Creates a snapshot of the file share. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return await self._client.share.create_snapshot( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def delete_share( + self, delete_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> None + """Marks the specified share for deletion. The share is + later deleted during garbage collection. + + :param bool delete_snapshots: + Indicates if snapshots are to be deleted. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START delete_share] + :end-before: [END delete_share] + :language: python + :dedent: 16 + :caption: Deletes the share and any snapshots. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + delete_include = None + if delete_snapshots: + delete_include = DeleteSnapshotsOptionType.include + try: + await self._client.share.delete( + timeout=timeout, + sharesnapshot=self.snapshot, + delete_snapshots=delete_include, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_share_properties(self, **kwargs): + # type: (Any) -> ShareProperties + """Returns all user-defined metadata and system properties for the + specified share. The data returned does not include the shares's + list of files or directories. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: The share properties. + :rtype: ~azure.storage.fileshare.ShareProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_hello_world_async.py + :start-after: [START get_share_properties] + :end-before: [END get_share_properties] + :language: python + :dedent: 16 + :caption: Gets the share properties. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + props = await self._client.share.get_properties( + timeout=timeout, + sharesnapshot=self.snapshot, + cls=deserialize_share_properties, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + props.name = self.share_name + props.snapshot = self.snapshot + return props # type: ignore + + @distributed_trace_async + async def set_share_quota(self, quota, **kwargs): + # type: (int, Any) -> Dict[str, Any] + """Sets the quota for the share. + + :param int quota: + Specifies the maximum size of the share, in gigabytes. + Must be greater than 0, and less than or equal to 5TB. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START set_share_quota] + :end-before: [END set_share_quota] + :language: python + :dedent: 16 + :caption: Sets the share quota. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + return await self._client.share.set_properties( # type: ignore + timeout=timeout, + quota=quota, + access_tier=None, + cls=return_response_headers, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + async def set_share_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Sets the share properties. + + .. versionadded:: 12.3.0 + + :keyword access_tier: + Specifies the access tier of the share. + Possible values: 'TransactionOptimized', 'Hot', and 'Cool' + :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier + :keyword int quota: + Specifies the maximum size of the share, in gigabytes. + Must be greater than 0, and less than or equal to 5TB. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword root_squash: + Root squash to set on the share. + Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash' + :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START set_share_properties] + :end-before: [END set_share_properties] + :language: python + :dedent: 16 + :caption: Sets the share properties. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + access_tier = kwargs.pop('access_tier', None) + quota = kwargs.pop('quota', None) + root_squash = kwargs.pop('root_squash', None) + if all(parameter is None for parameter in [access_tier, quota, root_squash]): + raise ValueError("set_share_properties should be called with at least one parameter.") + try: + return await self._client.share.set_properties( # type: ignore + timeout=timeout, + quota=quota, + access_tier=access_tier, + root_squash=root_squash, + lease_access_conditions=access_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_share_metadata(self, metadata, **kwargs): + # type: (Dict[str, Any], Any) -> Dict[str, Any] + """Sets the metadata for the share. + + Each call to this operation replaces all existing metadata + attached to the share. To remove all metadata from the share, + call this operation with no metadata dict. + + :param metadata: + Name-value pairs associated with the share as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START set_share_metadata] + :end-before: [END set_share_metadata] + :language: python + :dedent: 16 + :caption: Sets the share metadata. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + try: + return await self._client.share.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_share_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the share. The permissions + indicate whether files in a share may be accessed publicly. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + response, identifiers = await self._client.share.get_access_policy( + timeout=timeout, + cls=return_headers_and_deserialized, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return { + 'public_access': response.get('share_public_access'), + 'signed_identifiers': identifiers or [] + } + + @distributed_trace_async + async def set_share_access_policy(self, signed_identifiers, **kwargs): + # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] + """Sets the permissions for the share, or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether files in a share may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the share. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + if len(signed_identifiers) > 5: + raise ValueError( + 'Too many access policies provided. The server does not support setting ' + 'more than 5 access policies on a single resource.') + identifiers = [] + for key, value in signed_identifiers.items(): + if value: + value.start = serialize_iso(value.start) + value.expiry = serialize_iso(value.expiry) + identifiers.append(SignedIdentifier(id=key, access_policy=value)) + signed_identifiers = identifiers # type: ignore + + try: + return await self._client.share.set_access_policy( # type: ignore + share_acl=signed_identifiers or None, + timeout=timeout, + cls=return_response_headers, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_share_stats(self, **kwargs): + # type: (Any) -> int + """Gets the approximate size of the data stored on the share in bytes. + + Note that this value may not include all recently created + or recently re-sized files. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :return: The approximate size of the data (in bytes) stored on the share. + :rtype: int + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + stats = await self._client.share.get_statistics( + timeout=timeout, + lease_access_conditions=access_conditions, + **kwargs) + return stats.share_usage_bytes # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_directories_and_files( # type: ignore + self, directory_name=None, # type: Optional[str] + name_starts_with=None, # type: Optional[str] + marker=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Iterable[Dict[str,str]] + """Lists the directories and files under the share. + + :param str directory_name: + Name of a directory. + :param str name_starts_with: + Filters the results to return only directories whose names + begin with the specified prefix. + :param str marker: + An opaque continuation token. This value can be retrieved from the + next_marker field of a previous generator object. If specified, + this generator will begin returning results from this point. + :keyword list[str] include: + Include this parameter to specify one or more datasets to include in the response. + Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey". + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-10-02'. + + :keyword bool include_extended_info: + If this is set to true, file id will be returned in listed results. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-10-02'. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START share_list_files_in_dir] + :end-before: [END share_list_files_in_dir] + :language: python + :dedent: 16 + :caption: List directories and files in the share. + """ + timeout = kwargs.pop('timeout', None) + directory = self.get_directory_client(directory_name) + return directory.list_directories_and_files( + name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) + + @distributed_trace_async + async def create_permission_for_share(self, file_permission, # type: str + **kwargs # type: Any + ): + # type: (...) -> str + """Create a permission (a security descriptor) at the share level. + + This 'permission' can be used for the files/directories in the share. + If a 'permission' already exists, it shall return the key of it, else + creates a new permission at the share level and return its key. + + :param str file_permission: + File permission, a Portable SDDL + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A file permission key + :rtype: str + """ + timeout = kwargs.pop('timeout', None) + options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) + try: + return await self._client.share.create_permission(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_permission_for_share( # type: ignore + self, permission_key, # type: str + **kwargs # type: Any + ): + # type: (...) -> str + """Get a permission (a security descriptor) for a given key. + + This 'permission' can be used for the files/directories in the share. + + :param str permission_key: + Key of the file permission to retrieve + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A file permission (a portable SDDL) + :rtype: str + """ + timeout = kwargs.pop('timeout', None) + try: + return await self._client.share.get_permission( # type: ignore + file_permission_key=permission_key, + cls=deserialize_permission, + timeout=timeout, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def create_directory(self, directory_name, **kwargs): + # type: (str, Any) -> ShareDirectoryClient + """Creates a directory in the share and returns a client to interact + with the directory. + + :param str directory_name: + The name of the directory. + :keyword dict(str,str) metadata: + Name-value pairs associated with the directory as metadata. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: ShareDirectoryClient + :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient + """ + directory = self.get_directory_client(directory_name) + kwargs.setdefault('merge_span', True) + await directory.create_directory(**kwargs) + return directory # type: ignore + + @distributed_trace_async + async def delete_directory(self, directory_name, **kwargs): + # type: (str, Any) -> None + """Marks the directory for deletion. The directory is + later deleted during garbage collection. + + :param str directory_name: + The name of the directory. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + directory = self.get_directory_client(directory_name) + await directory.delete_directory(**kwargs) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_share_service_client_async.py b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_share_service_client_async.py new file mode 100644 index 0000000..dc9ef08 --- /dev/null +++ b/azure/multiapi/storagev2/fileshare/v2020_10_02/aio/_share_service_client_async.py @@ -0,0 +1,369 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import functools +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, + TYPE_CHECKING +) + +from azure.core.async_paging import AsyncItemPaged +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import AsyncPipeline +from azure.core.tracing.decorator_async import distributed_trace_async + +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.response_handlers import process_storage_error +from .._shared.policies_async import ExponentialRetry +from .._generated.aio import AzureFileStorage +from .._generated.models import StorageServiceProperties +from .._share_service_client import ShareServiceClient as ShareServiceClientBase +from .._serialize import get_api_version +from ._share_client_async import ShareClient +from ._models import SharePropertiesPaged +from .._models import service_properties_deserialize + +if TYPE_CHECKING: + from datetime import datetime + from .._shared.models import ResourceTypes, AccountSasPermissions + from .._models import ( + ShareProperties, + Metrics, + CorsRule, + ShareProtocolSettings, + ) + + +class ShareServiceClient(AsyncStorageAccountHostsMixin, ShareServiceClientBase): + """A client to interact with the File Share Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete shares within the account. + For operations relating to a specific share, a client for that entity + can also be retrieved using the :func:`get_share_client` function. + + :param str account_url: + The URL to the file share storage account. Any other entities included + in the URL path (e.g. share or file) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credential with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential from azure.core.credentials or an account + shared access key. + :keyword str api_version: + The Storage API version to use for requests. Default value is '2019-07-07'. + Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword loop: + The event loop to run the asynchronous tasks. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_authentication_async.py + :start-after: [START create_share_service_client] + :end-before: [END create_share_service_client] + :language: python + :dedent: 8 + :caption: Create the share service client with url and credential. + """ + def __init__( + self, account_url, # type: str + credential=None, # type: Optional[Any] + **kwargs # type: Any + ): + # type: (...) -> None + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + loop = kwargs.pop('loop', None) + super(ShareServiceClient, self).__init__( + account_url, + credential=credential, + loop=loop, + **kwargs) + self._client = AzureFileStorage(url=self.url, pipeline=self._pipeline, loop=loop) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + self._loop = loop + + @distributed_trace_async + async def get_service_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the properties of a storage account's File Share service, including + Azure Storage Analytics. + + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: A dictionary containing file service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START get_service_properties] + :end-before: [END get_service_properties] + :language: python + :dedent: 12 + :caption: Get file share service properties. + """ + timeout = kwargs.pop('timeout', None) + try: + service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) + return service_properties_deserialize(service_props) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_service_properties( + self, hour_metrics=None, # type: Optional[Metrics] + minute_metrics=None, # type: Optional[Metrics] + cors=None, # type: Optional[List[CorsRule]] + protocol=None, # type: Optional[ShareProtocolSettings], + **kwargs + ): + # type: (...) -> None + """Sets the properties of a storage account's File Share service, including + Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the + existing settings on the service for that functionality are preserved. + + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for files. + :type hour_metrics: ~azure.storage.fileshare.Metrics + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for files. + :type minute_metrics: ~azure.storage.fileshare.Metrics + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) + :param protocol_settings: + Sets protocol settings + :type protocol: ~azure.storage.fileshare.ShareProtocolSettings + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START set_service_properties] + :end-before: [END set_service_properties] + :language: python + :dedent: 8 + :caption: Sets file share service properties. + """ + timeout = kwargs.pop('timeout', None) + props = StorageServiceProperties( + hour_metrics=hour_metrics, + minute_metrics=minute_metrics, + cors=cors, + protocol=protocol + ) + try: + await self._client.service.set_properties(props, timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_shares( + self, name_starts_with=None, # type: Optional[str] + include_metadata=False, # type: Optional[bool] + include_snapshots=False, # type: Optional[bool] + **kwargs # type: Any + ): # type: (...) -> AsyncItemPaged + """Returns auto-paging iterable of dict-like ShareProperties under the specified account. + The generator will lazily follow the continuation tokens returned by + the service and stop when all shares have been returned. + + :param str name_starts_with: + Filters the results to return only shares whose names + begin with the specified name_starts_with. + :param bool include_metadata: + Specifies that share metadata be returned in the response. + :param bool include_snapshots: + Specifies that share snapshot be returned in the response. + :keyword bool include_deleted: + Specifies that deleted shares be returned in the response. + This is only for share soft delete enabled account. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :returns: An iterable (auto-paging) of ShareProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.ShareProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START fsc_list_shares] + :end-before: [END fsc_list_shares] + :language: python + :dedent: 16 + :caption: List shares in the file share service. + """ + timeout = kwargs.pop('timeout', None) + include = [] + if include_metadata: + include.append('metadata') + if include_snapshots: + include.append('snapshots') + include_deleted = kwargs.pop('include_deleted', None) + if include_deleted: + include.append("deleted") + + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.service.list_shares_segment, + include=include, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=SharePropertiesPaged) + + @distributed_trace_async + async def create_share( + self, share_name, # type: str + **kwargs + ): + # type: (...) -> ShareClient + """Creates a new share under the specified account. If the share + with the same name already exists, the operation fails. Returns a client with + which to interact with the newly created share. + + :param str share_name: The name of the share to create. + :keyword dict(str,str) metadata: + A dict with name_value pairs to associate with the + share as metadata. Example:{'Category':'test'} + :keyword int quota: + Quota in bytes. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.fileshare.aio.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START fsc_create_shares] + :end-before: [END fsc_create_shares] + :language: python + :dedent: 12 + :caption: Create a share in the file share service. + """ + metadata = kwargs.pop('metadata', None) + quota = kwargs.pop('quota', None) + timeout = kwargs.pop('timeout', None) + share = self.get_share_client(share_name) + kwargs.setdefault('merge_span', True) + await share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) + return share + + @distributed_trace_async + async def delete_share( + self, share_name, # type: Union[ShareProperties, str] + delete_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> None + """Marks the specified share for deletion. The share is + later deleted during garbage collection. + + :param share_name: + The share to delete. This can either be the name of the share, + or an instance of ShareProperties. + :type share_name: str or ~azure.storage.fileshare.ShareProperties + :param bool delete_snapshots: + Indicates if snapshots are to be deleted. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START fsc_delete_shares] + :end-before: [END fsc_delete_shares] + :language: python + :dedent: 16 + :caption: Delete a share in the file share service. + """ + timeout = kwargs.pop('timeout', None) + share = self.get_share_client(share_name) + kwargs.setdefault('merge_span', True) + await share.delete_share( + delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) + + @distributed_trace_async + async def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): + # type: (str, str, **Any) -> ShareClient + """Restores soft-deleted share. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.2.0 + This operation was introduced in API version '2019-12-12'. + + :param str deleted_share_name: + Specifies the name of the deleted share to restore. + :param str deleted_share_version: + Specifies the version of the deleted share to restore. + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: ~azure.storage.fileshare.aio.ShareClient + """ + share = self.get_share_client(deleted_share_name) + try: + await share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access + deleted_share_version=deleted_share_version, + timeout=kwargs.pop('timeout', None), **kwargs) + return share + except HttpResponseError as error: + process_storage_error(error) + + def get_share_client(self, share, snapshot=None): + # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient + """Get a client to interact with the specified share. + The share need not already exist. + + :param share: + The share. This can either be the name of the share, + or an instance of ShareProperties. + :type share: str or ~azure.storage.fileshare.ShareProperties + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :returns: A ShareClient. + :rtype: ~azure.storage.fileshare.aio.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START get_share_client] + :end-before: [END get_share_client] + :language: python + :dedent: 8 + :caption: Gets the share client. + """ + try: + share_name = share.name + except AttributeError: + share_name = share + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareClient( + self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, + api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, loop=self._loop) diff --git a/azure/multiapi/storagev2/fileshare/v2020_10_02/py.typed b/azure/multiapi/storagev2/fileshare/v2020_10_02/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_deserialize.py b/azure/multiapi/storagev2/queue/v2018_03_28/_deserialize.py index 893e255..7eda92e 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_deserialize.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_deserialize.py @@ -12,7 +12,7 @@ def deserialize_metadata(response, obj, headers): - raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} + raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} return {k[10:]: v for k, v in raw_metadata.items()} @@ -26,6 +26,7 @@ def deserialize_queue_properties(response, obj, headers): def deserialize_queue_creation(response, obj, headers): + response = response.http_response if response.status_code == 204: error_code = StorageErrorCode.queue_already_exists error = ResourceExistsError( diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/__init__.py b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/__init__.py index 1ee3b75..2519a66 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/__init__.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/__init__.py @@ -1,18 +1,16 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from ._azure_queue_storage import AzureQueueStorage __all__ = ['AzureQueueStorage'] -from .version import VERSION - -__version__ = VERSION - +try: + from ._patch import patch_sdk # type: ignore + patch_sdk() +except ImportError: + pass diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/_azure_queue_storage.py b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/_azure_queue_storage.py index e441ecb..1248b58 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/_azure_queue_storage.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/_azure_queue_storage.py @@ -1,19 +1,21 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from typing import TYPE_CHECKING + from azure.core import PipelineClient -from msrest import Serializer, Deserializer +from msrest import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any from ._configuration import AzureQueueStorageConfiguration -from azure.core.exceptions import map_error from .operations import ServiceOperations from .operations import QueueOperations from .operations import MessagesOperations @@ -22,32 +24,33 @@ class AzureQueueStorage(object): - """AzureQueueStorage + """AzureQueueStorage. - - :ivar service: Service operations + :ivar service: ServiceOperations operations :vartype service: azure.storage.queue.operations.ServiceOperations - :ivar queue: Queue operations + :ivar queue: QueueOperations operations :vartype queue: azure.storage.queue.operations.QueueOperations - :ivar messages: Messages operations + :ivar messages: MessagesOperations operations :vartype messages: azure.storage.queue.operations.MessagesOperations - :ivar message_id: MessageId operations + :ivar message_id: MessageIdOperations operations :vartype message_id: azure.storage.queue.operations.MessageIdOperations - - :param url: The URL of the service account, queue or message that is the - targe of the desired operation. + :param url: The URL of the service account, queue or message that is the targe of the desired operation. :type url: str """ - def __init__(self, url, **kwargs): - + def __init__( + self, + url, # type: str + **kwargs # type: Any + ): + # type: (...) -> None base_url = '{url}' self._config = AzureQueueStorageConfiguration(url, **kwargs) self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2018-03-28' self._serialize = Serializer(client_models) + self._serialize.client_side_validation = False self._deserialize = Deserializer(client_models) self.service = ServiceOperations( @@ -60,9 +63,14 @@ def __init__(self, url, **kwargs): self._client, self._config, self._serialize, self._deserialize) def close(self): + # type: () -> None self._client.close() + def __enter__(self): + # type: () -> AzureQueueStorage self._client.__enter__() return self + def __exit__(self, *exc_details): + # type: (Any) -> None self._client.__exit__(*exc_details) diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/_configuration.py b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/_configuration.py index 75443cb..530cb1c 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/_configuration.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/_configuration.py @@ -1,52 +1,58 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from typing import TYPE_CHECKING + from azure.core.configuration import Configuration from azure.core.pipeline import policies -from .version import VERSION +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any +VERSION = "unknown" class AzureQueueStorageConfiguration(Configuration): - """Configuration for AzureQueueStorage + """Configuration for AzureQueueStorage. + Note that all parameters used to create this instance are saved as instance attributes. - :param url: The URL of the service account, queue or message that is the - targe of the desired operation. + :param url: The URL of the service account, queue or message that is the targe of the desired operation. :type url: str - :ivar version: Specifies the version of the operation to use for this - request. - :type version: str """ - def __init__(self, url, **kwargs): - + def __init__( + self, + url, # type: str + **kwargs # type: Any + ): + # type: (...) -> None if url is None: raise ValueError("Parameter 'url' must not be None.") - super(AzureQueueStorageConfiguration, self).__init__(**kwargs) - self._configure(**kwargs) - - self.user_agent_policy.add_user_agent('azsdk-python-azurequeuestorage/{}'.format(VERSION)) - self.generate_client_request_id = True self.url = url self.version = "2018-03-28" + kwargs.setdefault('sdk_moniker', 'azurequeuestorage/{}'.format(VERSION)) + self._configure(**kwargs) - def _configure(self, **kwargs): + def _configure( + self, + **kwargs # type: Any + ): + # type: (...) -> None self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/aio/__init__.py b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/aio/__init__.py index a6c00c2..c42c1a4 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/aio/__init__.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/aio/__init__.py @@ -1,13 +1,10 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from ._azure_queue_storage_async import AzureQueueStorage +from ._azure_queue_storage import AzureQueueStorage __all__ = ['AzureQueueStorage'] diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/aio/_azure_queue_storage.py b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/aio/_azure_queue_storage.py new file mode 100644 index 0000000..3b7bb20 --- /dev/null +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/aio/_azure_queue_storage.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core import AsyncPipelineClient +from msrest import Deserializer, Serializer + +from ._configuration import AzureQueueStorageConfiguration +from .operations import ServiceOperations +from .operations import QueueOperations +from .operations import MessagesOperations +from .operations import MessageIdOperations +from .. import models + + +class AzureQueueStorage(object): + """AzureQueueStorage. + + :ivar service: ServiceOperations operations + :vartype service: azure.storage.queue.aio.operations.ServiceOperations + :ivar queue: QueueOperations operations + :vartype queue: azure.storage.queue.aio.operations.QueueOperations + :ivar messages: MessagesOperations operations + :vartype messages: azure.storage.queue.aio.operations.MessagesOperations + :ivar message_id: MessageIdOperations operations + :vartype message_id: azure.storage.queue.aio.operations.MessageIdOperations + :param url: The URL of the service account, queue or message that is the targe of the desired operation. + :type url: str + """ + + def __init__( + self, + url: str, + **kwargs: Any + ) -> None: + base_url = '{url}' + self._config = AzureQueueStorageConfiguration(url, **kwargs) + self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._serialize.client_side_validation = False + self._deserialize = Deserializer(client_models) + + self.service = ServiceOperations( + self._client, self._config, self._serialize, self._deserialize) + self.queue = QueueOperations( + self._client, self._config, self._serialize, self._deserialize) + self.messages = MessagesOperations( + self._client, self._config, self._serialize, self._deserialize) + self.message_id = MessageIdOperations( + self._client, self._config, self._serialize, self._deserialize) + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "AzureQueueStorage": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details) -> None: + await self._client.__aexit__(*exc_details) diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/aio/_configuration.py b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/aio/_configuration.py new file mode 100644 index 0000000..ecd33f8 --- /dev/null +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/aio/_configuration.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +VERSION = "unknown" + +class AzureQueueStorageConfiguration(Configuration): + """Configuration for AzureQueueStorage. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, queue or message that is the targe of the desired operation. + :type url: str + """ + + def __init__( + self, + url: str, + **kwargs: Any + ) -> None: + if url is None: + raise ValueError("Parameter 'url' must not be None.") + super(AzureQueueStorageConfiguration, self).__init__(**kwargs) + + self.url = url + self.version = "2018-03-28" + kwargs.setdefault('sdk_moniker', 'azurequeuestorage/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs: Any + ) -> None: + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/aio/operations/__init__.py b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/aio/operations/__init__.py new file mode 100644 index 0000000..c0abe55 --- /dev/null +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/aio/operations/__init__.py @@ -0,0 +1,19 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._queue_operations import QueueOperations +from ._messages_operations import MessagesOperations +from ._message_id_operations import MessageIdOperations + +__all__ = [ + 'ServiceOperations', + 'QueueOperations', + 'MessagesOperations', + 'MessageIdOperations', +] diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/aio/operations/_message_id_operations.py b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/aio/operations/_message_id_operations.py new file mode 100644 index 0000000..c84b04e --- /dev/null +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/aio/operations/_message_id_operations.py @@ -0,0 +1,205 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar +import warnings + +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest + +from ... import models as _models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class MessageIdOperations: + """MessageIdOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.queue.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = _models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def update( + self, + pop_receipt: str, + visibilitytimeout: int, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + queue_message: Optional["_models.QueueMessage"] = None, + **kwargs + ) -> None: + """The Update operation was introduced with version 2011-08-18 of the Queue service API. The + Update Message operation updates the visibility timeout of a message. You can also use this + operation to update the contents of a message. A message must be in a format that can be + included in an XML request with UTF-8 encoding, and the encoded message can be up to 64KB in + size. + + :param pop_receipt: Required. Specifies the valid pop receipt value returned from an earlier + call to the Get Messages or Update Message operation. + :type pop_receipt: str + :param visibilitytimeout: Optional. Specifies the new visibility timeout value, in seconds, + relative to server time. The default value is 30 seconds. A specified value must be larger than + or equal to 1 second, and cannot be larger than 7 days, or larger than 2 hours on REST protocol + versions prior to version 2011-08-18. The visibility timeout of a message can be set to a value + later than the expiry time. + :type visibilitytimeout: int + :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: + """The Delete operation deletes the specified message. + + :param pop_receipt: Required. Specifies the valid pop receipt value returned from an earlier + call to the Get Messages or Update Message operation. + :type pop_receipt: str + :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def dequeue( + self, + number_of_messages: Optional[int] = None, + visibilitytimeout: Optional[int] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs + ) -> List["_models.DequeuedMessageItem"]: + """The Dequeue operation retrieves one or more messages from the front of the queue. + + :param number_of_messages: Optional. A nonzero integer value that specifies the number of + messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible + messages are returned. By default, a single message is retrieved from the queue with this + operation. + :type number_of_messages: int + :param visibilitytimeout: Optional. Specifies the new visibility timeout value, in seconds, + relative to server time. The default value is 30 seconds. A specified value must be larger than + or equal to 1 second, and cannot be larger than 7 days, or larger than 2 hours on REST protocol + versions prior to version 2011-08-18. The visibility timeout of a message can be set to a value + later than the expiry time. + :type visibilitytimeout: int + :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: + """The Clear operation deletes all messages from the specified queue. + + :param timeout: The The timeout parameter is expressed in seconds. For more information, see List["_models.EnqueuedMessage"]: + """The Enqueue operation adds a new message to the back of the message queue. A visibility timeout + can also be specified to make the message invisible until the visibility timeout expires. A + message must be in a format that can be included in an XML request with UTF-8 encoding. The + encoded message can be up to 64 KB in size for versions 2011-08-18 and newer, or 8 KB in size + for previous versions. + + :param queue_message: A Message object which can be stored in a Queue. + :type queue_message: ~azure.storage.queue.models.QueueMessage + :param visibilitytimeout: Optional. If specified, the request must be made using an x-ms- + version of 2011-08-18 or later. If not specified, the default value is 0. Specifies the new + visibility timeout value, in seconds, relative to server time. The new value must be larger + than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message + cannot be set to a value later than the expiry time. visibilitytimeout should be set to a value + smaller than the time-to-live value. + :type visibilitytimeout: int + :param message_time_to_live: Optional. Specifies the time-to-live interval for the message, in + seconds. Prior to version 2017-07-29, the maximum time-to-live allowed is 7 days. For version + 2017-07-29 or later, the maximum time-to-live can be any positive number, as well as -1 + indicating that the message does not expire. If this parameter is omitted, the default time-to- + live is 7 days. + :type message_time_to_live: int + :param timeout: The The timeout parameter is expressed in seconds. For more information, see List["_models.PeekedMessageItem"]: + """The Peek operation retrieves one or more messages from the front of the queue, but does not + alter the visibility of the message. + + :param number_of_messages: Optional. A nonzero integer value that specifies the number of + messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible + messages are returned. By default, a single message is retrieved from the queue with this + operation. + :type number_of_messages: int + :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def create( + self, + timeout: Optional[int] = None, + metadata: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs + ) -> None: + """creates a new queue under the given account. + + :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: + """operation permanently deletes the specified queue. + + :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: + """Retrieves user-defined metadata and queue properties on the specified queue. Metadata is + associated with the queue as name-values pairs. + + :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: + """sets user-defined metadata on the specified queue. Metadata is associated with the queue as + name-value pairs. + + :param timeout: The The timeout parameter is expressed in seconds. For more information, see List["_models.SignedIdentifier"]: + """returns details about any stored access policies specified on the queue that may be used with + Shared Access Signatures. + + :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: + """sets stored access policies for the queue that may be used with Shared Access Signatures. + + :param timeout: The The timeout parameter is expressed in seconds. For more information, see None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def set_properties( + self, + storage_service_properties: "_models.StorageServiceProperties", + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs + ) -> None: + """Sets properties for a storage account's Queue service endpoint, including properties for + Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. + :type storage_service_properties: ~azure.storage.queue.models.StorageServiceProperties + :param timeout: The The timeout parameter is expressed in seconds. For more information, see "_models.StorageServiceProperties": + """gets the properties of a storage account's Queue service, including properties for Storage + Analytics and CORS (Cross-Origin Resource Sharing) rules. + + :param timeout: The The timeout parameter is expressed in seconds. For more information, see "_models.StorageServiceStats": + """Retrieves statistics related to replication for the Queue service. It is only available on the + secondary location endpoint when read-access geo-redundant replication is enabled for the + storage account. + + :param timeout: The The timeout parameter is expressed in seconds. For more information, see "_models.ListQueuesSegmentResponse": + """The List Queues Segment operation returns a list of the queues under the specified account. + + :param prefix: Filters the results to return only queues whose name begins with the specified + prefix. + :type prefix: str + :param marker: A string value that identifies the portion of the list of queues to be returned + with the next listing operation. The operation returns the NextMarker value within the response + body if the listing operation did not return all queues remaining to be listed with the current + page. The NextMarker value can be used as the value for the marker parameter in a subsequent + call to request the next page of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of queues to return. If the request does not + specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 + items. Note that if the listing operation crosses a partition boundary, then the service will + return a continuation token for retrieving the remainder of the results. For this reason, it is + possible that the service will return fewer results than specified by maxresults, or than the + default of 5000. + :type maxresults: int + :param include: Include this parameter to specify that the queues's metadata be returned as + part of the response body. + :type include: list[str] + :param timeout: The The timeout parameter is expressed in seconds. For more information, see `. :type metadata: dict[str, str] """ @@ -449,25 +434,28 @@ class QueueItem(Model): } _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, + 'name': {'key': 'Name', 'type': 'str'}, + 'metadata': {'key': 'Metadata', 'type': '{str}'}, } _xml_map = { 'name': 'Queue' } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(QueueItem, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.metadata = kwargs.get('metadata', None) -class QueueMessage(Model): +class QueueMessage(msrest.serialization.Model): """A Message object which can be stored in a Queue. All required parameters must be populated in order to send to Azure. - :param message_text: Required. The content of the message + :param message_text: Required. The content of the message. :type message_text: str """ @@ -476,27 +464,27 @@ class QueueMessage(Model): } _attribute_map = { - 'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}}, - } - _xml_map = { + 'message_text': {'key': 'MessageText', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(QueueMessage, self).__init__(**kwargs) - self.message_text = kwargs.get('message_text', None) + self.message_text = kwargs['message_text'] -class RetentionPolicy(Model): +class RetentionPolicy(msrest.serialization.Model): """the retention policy. All required parameters must be populated in order to send to Azure. - :param enabled: Required. Indicates whether a retention policy is enabled - for the storage service + :param enabled: Required. Indicates whether a retention policy is enabled for the storage + service. :type enabled: bool - :param days: Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted + :param days: Indicates the number of days that metrics or logging or soft-deleted data should + be retained. All data older than this value will be deleted. :type days: int """ @@ -506,26 +494,27 @@ class RetentionPolicy(Model): } _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, - } - _xml_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'days': {'key': 'Days', 'type': 'int'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(RetentionPolicy, self).__init__(**kwargs) - self.enabled = kwargs.get('enabled', None) + self.enabled = kwargs['enabled'] self.days = kwargs.get('days', None) -class SignedIdentifier(Model): +class SignedIdentifier(msrest.serialization.Model): """signed identifier. All required parameters must be populated in order to send to Azure. - :param id: Required. a unique id + :param id: Required. a unique id. :type id: str - :param access_policy: The access policy + :param access_policy: The access policy. :type access_policy: ~azure.storage.queue.models.AccessPolicy """ @@ -534,19 +523,20 @@ class SignedIdentifier(Model): } _attribute_map = { - 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, - } - _xml_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(SignedIdentifier, self).__init__(**kwargs) - self.id = kwargs.get('id', None) + self.id = kwargs['id'] self.access_policy = kwargs.get('access_policy', None) -class StorageError(Model): +class StorageError(msrest.serialization.Model): """StorageError. :param message: @@ -554,57 +544,43 @@ class StorageError(Model): """ _attribute_map = { - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { + 'message': {'key': 'Message', 'type': 'str'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StorageError, self).__init__(**kwargs) self.message = kwargs.get('message', None) -class StorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'StorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'StorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(StorageErrorException, self).__init__(response=response) - - -class StorageServiceProperties(Model): +class StorageServiceProperties(msrest.serialization.Model): """Storage Service Properties. - :param logging: Azure Analytics Logging settings + :param logging: Azure Analytics Logging settings. :type logging: ~azure.storage.queue.models.Logging - :param hour_metrics: A summary of request statistics grouped by API in - hourly aggregates for queues + :param hour_metrics: A summary of request statistics grouped by API in hourly aggregates for + queues. :type hour_metrics: ~azure.storage.queue.models.Metrics - :param minute_metrics: a summary of request statistics grouped by API in - minute aggregates for queues + :param minute_metrics: a summary of request statistics grouped by API in minute aggregates for + queues. :type minute_metrics: ~azure.storage.queue.models.Metrics :param cors: The set of CORS rules. :type cors: list[~azure.storage.queue.models.CorsRule] """ _attribute_map = { - 'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}}, - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, - } - _xml_map = { + 'logging': {'key': 'Logging', 'type': 'Logging'}, + 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, + 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, + 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StorageServiceProperties, self).__init__(**kwargs) self.logging = kwargs.get('logging', None) self.hour_metrics = kwargs.get('hour_metrics', None) @@ -612,20 +588,20 @@ def __init__(self, **kwargs): self.cors = kwargs.get('cors', None) -class StorageServiceStats(Model): +class StorageServiceStats(msrest.serialization.Model): """Stats for the storage service. - :param geo_replication: Geo-Replication information for the Secondary - Storage Service + :param geo_replication: Geo-Replication information for the Secondary Storage Service. :type geo_replication: ~azure.storage.queue.models.GeoReplication """ _attribute_map = { - 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}}, - } - _xml_map = { + 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication'}, } - def __init__(self, **kwargs): + def __init__( + self, + **kwargs + ): super(StorageServiceStats, self).__init__(**kwargs) self.geo_replication = kwargs.get('geo_replication', None) diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/models/_models_py3.py b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/models/_models_py3.py index e74ab62..b4000bf 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/models/_models_py3.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/models/_models_py3.py @@ -1,80 +1,73 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from msrest.serialization import Model +import datetime +from typing import Dict, List, Optional, Union + from azure.core.exceptions import HttpResponseError +import msrest.serialization +from ._azure_queue_storage_enums import * -class AccessPolicy(Model): - """An Access policy. - All required parameters must be populated in order to send to Azure. +class AccessPolicy(msrest.serialization.Model): + """An Access policy. - :param start: Required. the date-time the policy is active + :param start: the date-time the policy is active. :type start: str - :param expiry: Required. the date-time the policy expires + :param expiry: the date-time the policy expires. :type expiry: str - :param permission: Required. the permissions for the acl policy + :param permission: the permissions for the acl policy. :type permission: str """ - _validation = { - 'start': {'required': True}, - 'expiry': {'required': True}, - 'permission': {'required': True}, - } - _attribute_map = { - 'start': {'key': 'Start', 'type': 'str', 'xml': {'name': 'Start'}}, - 'expiry': {'key': 'Expiry', 'type': 'str', 'xml': {'name': 'Expiry'}}, - 'permission': {'key': 'Permission', 'type': 'str', 'xml': {'name': 'Permission'}}, - } - _xml_map = { - } - - def __init__(self, *, start: str, expiry: str, permission: str, **kwargs) -> None: + 'start': {'key': 'Start', 'type': 'str'}, + 'expiry': {'key': 'Expiry', 'type': 'str'}, + 'permission': {'key': 'Permission', 'type': 'str'}, + } + + def __init__( + self, + *, + start: Optional[str] = None, + expiry: Optional[str] = None, + permission: Optional[str] = None, + **kwargs + ): super(AccessPolicy, self).__init__(**kwargs) self.start = start self.expiry = expiry self.permission = permission -class CorsRule(Model): - """CORS is an HTTP feature that enables a web application running under one - domain to access resources in another domain. Web browsers implement a - security restriction known as same-origin policy that prevents a web page - from calling APIs in a different domain; CORS provides a secure way to - allow one domain (the origin domain) to call APIs in another domain. +class CorsRule(msrest.serialization.Model): + """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. All required parameters must be populated in order to send to Azure. - :param allowed_origins: Required. The origin domains that are permitted to - make a request against the storage service via CORS. The origin domain is - the domain from which the request originates. Note that the origin must be - an exact case-sensitive match with the origin that the user age sends to - the service. You can also use the wildcard character '*' to allow all - origin domains to make requests via CORS. + :param allowed_origins: Required. The origin domains that are permitted to make a request + against the storage service via CORS. The origin domain is the domain from which the request + originates. Note that the origin must be an exact case-sensitive match with the origin that the + user age sends to the service. You can also use the wildcard character '*' to allow all origin + domains to make requests via CORS. :type allowed_origins: str - :param allowed_methods: Required. The methods (HTTP request verbs) that - the origin domain may use for a CORS request. (comma separated) + :param allowed_methods: Required. The methods (HTTP request verbs) that the origin domain may + use for a CORS request. (comma separated). :type allowed_methods: str - :param allowed_headers: Required. the request headers that the origin - domain may specify on the CORS request. + :param allowed_headers: Required. the request headers that the origin domain may specify on the + CORS request. :type allowed_headers: str - :param exposed_headers: Required. The response headers that may be sent in - the response to the CORS request and exposed by the browser to the request - issuer + :param exposed_headers: Required. The response headers that may be sent in the response to the + CORS request and exposed by the browser to the request issuer. :type exposed_headers: str - :param max_age_in_seconds: Required. The maximum amount time that a - browser should cache the preflight OPTIONS request. + :param max_age_in_seconds: Required. The maximum amount time that a browser should cache the + preflight OPTIONS request. :type max_age_in_seconds: int """ @@ -87,16 +80,23 @@ class CorsRule(Model): } _attribute_map = { - 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str', 'xml': {'name': 'AllowedOrigins'}}, - 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str', 'xml': {'name': 'AllowedMethods'}}, - 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str', 'xml': {'name': 'AllowedHeaders'}}, - 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str', 'xml': {'name': 'ExposedHeaders'}}, - 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int', 'xml': {'name': 'MaxAgeInSeconds'}}, - } - _xml_map = { - } - - def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_headers: str, exposed_headers: str, max_age_in_seconds: int, **kwargs) -> None: + 'allowed_origins': {'key': 'AllowedOrigins', 'type': 'str'}, + 'allowed_methods': {'key': 'AllowedMethods', 'type': 'str'}, + 'allowed_headers': {'key': 'AllowedHeaders', 'type': 'str'}, + 'exposed_headers': {'key': 'ExposedHeaders', 'type': 'str'}, + 'max_age_in_seconds': {'key': 'MaxAgeInSeconds', 'type': 'int'}, + } + + def __init__( + self, + *, + allowed_origins: str, + allowed_methods: str, + allowed_headers: str, + exposed_headers: str, + max_age_in_seconds: int, + **kwargs + ): super(CorsRule, self).__init__(**kwargs) self.allowed_origins = allowed_origins self.allowed_methods = allowed_methods @@ -105,29 +105,25 @@ def __init__(self, *, allowed_origins: str, allowed_methods: str, allowed_header self.max_age_in_seconds = max_age_in_seconds -class DequeuedMessageItem(Model): - """The object returned in the QueueMessageList array when calling Get Messages - on a Queue. +class DequeuedMessageItem(msrest.serialization.Model): + """The object returned in the QueueMessageList array when calling Get Messages on a Queue. All required parameters must be populated in order to send to Azure. :param message_id: Required. The Id of the Message. :type message_id: str - :param insertion_time: Required. The time the Message was inserted into - the Queue. - :type insertion_time: datetime - :param expiration_time: Required. The time that the Message will expire - and be automatically deleted. - :type expiration_time: datetime - :param pop_receipt: Required. This value is required to delete the - Message. If deletion fails using this popreceipt then the message has been - dequeued by another client. + :param insertion_time: Required. The time the Message was inserted into the Queue. + :type insertion_time: ~datetime.datetime + :param expiration_time: Required. The time that the Message will expire and be automatically + deleted. + :type expiration_time: ~datetime.datetime + :param pop_receipt: Required. This value is required to delete the Message. If deletion fails + using this popreceipt then the message has been dequeued by another client. :type pop_receipt: str - :param time_next_visible: Required. The time that the message will again - become visible in the Queue. - :type time_next_visible: datetime - :param dequeue_count: Required. The number of times the message has been - dequeued. + :param time_next_visible: Required. The time that the message will again become visible in the + Queue. + :type time_next_visible: ~datetime.datetime + :param dequeue_count: Required. The number of times the message has been dequeued. :type dequeue_count: long :param message_text: Required. The content of the Message. :type message_text: str @@ -144,19 +140,30 @@ class DequeuedMessageItem(Model): } _attribute_map = { - 'message_id': {'key': 'MessageId', 'type': 'str', 'xml': {'name': 'MessageId'}}, - 'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123', 'xml': {'name': 'InsertionTime'}}, - 'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123', 'xml': {'name': 'ExpirationTime'}}, - 'pop_receipt': {'key': 'PopReceipt', 'type': 'str', 'xml': {'name': 'PopReceipt'}}, - 'time_next_visible': {'key': 'TimeNextVisible', 'type': 'rfc-1123', 'xml': {'name': 'TimeNextVisible'}}, - 'dequeue_count': {'key': 'DequeueCount', 'type': 'long', 'xml': {'name': 'DequeueCount'}}, - 'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}}, + 'message_id': {'key': 'MessageId', 'type': 'str'}, + 'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123'}, + 'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123'}, + 'pop_receipt': {'key': 'PopReceipt', 'type': 'str'}, + 'time_next_visible': {'key': 'TimeNextVisible', 'type': 'rfc-1123'}, + 'dequeue_count': {'key': 'DequeueCount', 'type': 'long'}, + 'message_text': {'key': 'MessageText', 'type': 'str'}, } _xml_map = { 'name': 'QueueMessage' } - def __init__(self, *, message_id: str, insertion_time, expiration_time, pop_receipt: str, time_next_visible, dequeue_count: int, message_text: str, **kwargs) -> None: + def __init__( + self, + *, + message_id: str, + insertion_time: datetime.datetime, + expiration_time: datetime.datetime, + pop_receipt: str, + time_next_visible: datetime.datetime, + dequeue_count: int, + message_text: str, + **kwargs + ): super(DequeuedMessageItem, self).__init__(**kwargs) self.message_id = message_id self.insertion_time = insertion_time @@ -167,27 +174,24 @@ def __init__(self, *, message_id: str, insertion_time, expiration_time, pop_rece self.message_text = message_text -class EnqueuedMessage(Model): - """The object returned in the QueueMessageList array when calling Put Message - on a Queue. +class EnqueuedMessage(msrest.serialization.Model): + """The object returned in the QueueMessageList array when calling Put Message on a Queue. All required parameters must be populated in order to send to Azure. :param message_id: Required. The Id of the Message. :type message_id: str - :param insertion_time: Required. The time the Message was inserted into - the Queue. - :type insertion_time: datetime - :param expiration_time: Required. The time that the Message will expire - and be automatically deleted. - :type expiration_time: datetime - :param pop_receipt: Required. This value is required to delete the - Message. If deletion fails using this popreceipt then the message has been - dequeued by another client. + :param insertion_time: Required. The time the Message was inserted into the Queue. + :type insertion_time: ~datetime.datetime + :param expiration_time: Required. The time that the Message will expire and be automatically + deleted. + :type expiration_time: ~datetime.datetime + :param pop_receipt: Required. This value is required to delete the Message. If deletion fails + using this popreceipt then the message has been dequeued by another client. :type pop_receipt: str - :param time_next_visible: Required. The time that the message will again - become visible in the Queue. - :type time_next_visible: datetime + :param time_next_visible: Required. The time that the message will again become visible in the + Queue. + :type time_next_visible: ~datetime.datetime """ _validation = { @@ -199,17 +203,26 @@ class EnqueuedMessage(Model): } _attribute_map = { - 'message_id': {'key': 'MessageId', 'type': 'str', 'xml': {'name': 'MessageId'}}, - 'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123', 'xml': {'name': 'InsertionTime'}}, - 'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123', 'xml': {'name': 'ExpirationTime'}}, - 'pop_receipt': {'key': 'PopReceipt', 'type': 'str', 'xml': {'name': 'PopReceipt'}}, - 'time_next_visible': {'key': 'TimeNextVisible', 'type': 'rfc-1123', 'xml': {'name': 'TimeNextVisible'}}, + 'message_id': {'key': 'MessageId', 'type': 'str'}, + 'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123'}, + 'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123'}, + 'pop_receipt': {'key': 'PopReceipt', 'type': 'str'}, + 'time_next_visible': {'key': 'TimeNextVisible', 'type': 'rfc-1123'}, } _xml_map = { 'name': 'QueueMessage' } - def __init__(self, *, message_id: str, insertion_time, expiration_time, pop_receipt: str, time_next_visible, **kwargs) -> None: + def __init__( + self, + *, + message_id: str, + insertion_time: datetime.datetime, + expiration_time: datetime.datetime, + pop_receipt: str, + time_next_visible: datetime.datetime, + **kwargs + ): super(EnqueuedMessage, self).__init__(**kwargs) self.message_id = message_id self.insertion_time = insertion_time @@ -218,19 +231,18 @@ def __init__(self, *, message_id: str, insertion_time, expiration_time, pop_rece self.time_next_visible = time_next_visible -class GeoReplication(Model): +class GeoReplication(msrest.serialization.Model): """GeoReplication. All required parameters must be populated in order to send to Azure. - :param status: Required. The status of the secondary location. Possible - values include: 'live', 'bootstrap', 'unavailable' + :param status: Required. The status of the secondary location. Possible values include: "live", + "bootstrap", "unavailable". :type status: str or ~azure.storage.queue.models.GeoReplicationStatusType - :param last_sync_time: Required. A GMT date/time value, to the second. All - primary writes preceding this value are guaranteed to be available for - read operations at the secondary. Primary writes after this point in time - may or may not be available for reads. - :type last_sync_time: datetime + :param last_sync_time: Required. A GMT date/time value, to the second. All primary writes + preceding this value are guaranteed to be available for read operations at the secondary. + Primary writes after this point in time may or may not be available for reads. + :type last_sync_time: ~datetime.datetime """ _validation = { @@ -239,19 +251,23 @@ class GeoReplication(Model): } _attribute_map = { - 'status': {'key': 'Status', 'type': 'str', 'xml': {'name': 'Status'}}, - 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123', 'xml': {'name': 'LastSyncTime'}}, - } - _xml_map = { + 'status': {'key': 'Status', 'type': 'str'}, + 'last_sync_time': {'key': 'LastSyncTime', 'type': 'rfc-1123'}, } - def __init__(self, *, status, last_sync_time, **kwargs) -> None: + def __init__( + self, + *, + status: Union[str, "GeoReplicationStatusType"], + last_sync_time: datetime.datetime, + **kwargs + ): super(GeoReplication, self).__init__(**kwargs) self.status = status self.last_sync_time = last_sync_time -class ListQueuesSegmentResponse(Model): +class ListQueuesSegmentResponse(msrest.serialization.Model): """The object returned when calling List Queues on a Queue Service. All required parameters must be populated in order to send to Azure. @@ -278,18 +294,28 @@ class ListQueuesSegmentResponse(Model): } _attribute_map = { - 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, - 'prefix': {'key': 'Prefix', 'type': 'str', 'xml': {'name': 'Prefix'}}, - 'marker': {'key': 'Marker', 'type': 'str', 'xml': {'name': 'Marker'}}, - 'max_results': {'key': 'MaxResults', 'type': 'int', 'xml': {'name': 'MaxResults'}}, - 'queue_items': {'key': 'QueueItems', 'type': '[QueueItem]', 'xml': {'name': 'Queues', 'itemsName': 'Queues', 'wrapped': True}}, - 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'attr': True}}, + 'prefix': {'key': 'Prefix', 'type': 'str'}, + 'marker': {'key': 'Marker', 'type': 'str'}, + 'max_results': {'key': 'MaxResults', 'type': 'int'}, + 'queue_items': {'key': 'QueueItems', 'type': '[QueueItem]', 'xml': {'name': 'Queues', 'wrapped': True, 'itemsName': 'Queue'}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str'}, } _xml_map = { 'name': 'EnumerationResults' } - def __init__(self, *, service_endpoint: str, prefix: str, max_results: int, next_marker: str, marker: str=None, queue_items=None, **kwargs) -> None: + def __init__( + self, + *, + service_endpoint: str, + prefix: str, + max_results: int, + next_marker: str, + marker: Optional[str] = None, + queue_items: Optional[List["QueueItem"]] = None, + **kwargs + ): super(ListQueuesSegmentResponse, self).__init__(**kwargs) self.service_endpoint = service_endpoint self.prefix = prefix @@ -299,23 +325,20 @@ def __init__(self, *, service_endpoint: str, prefix: str, max_results: int, next self.next_marker = next_marker -class Logging(Model): +class Logging(msrest.serialization.Model): """Azure Analytics Logging settings. All required parameters must be populated in order to send to Azure. :param version: Required. The version of Storage Analytics to configure. :type version: str - :param delete: Required. Indicates whether all delete requests should be - logged. + :param delete: Required. Indicates whether all delete requests should be logged. :type delete: bool - :param read: Required. Indicates whether all read requests should be - logged. + :param read: Required. Indicates whether all read requests should be logged. :type read: bool - :param write: Required. Indicates whether all write requests should be - logged. + :param write: Required. Indicates whether all write requests should be logged. :type write: bool - :param retention_policy: Required. + :param retention_policy: Required. the retention policy. :type retention_policy: ~azure.storage.queue.models.RetentionPolicy """ @@ -328,16 +351,23 @@ class Logging(Model): } _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'delete': {'key': 'Delete', 'type': 'bool', 'xml': {'name': 'Delete'}}, - 'read': {'key': 'Read', 'type': 'bool', 'xml': {'name': 'Read'}}, - 'write': {'key': 'Write', 'type': 'bool', 'xml': {'name': 'Write'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, *, version: str, delete: bool, read: bool, write: bool, retention_policy, **kwargs) -> None: + 'version': {'key': 'Version', 'type': 'str'}, + 'delete': {'key': 'Delete', 'type': 'bool'}, + 'read': {'key': 'Read', 'type': 'bool'}, + 'write': {'key': 'Write', 'type': 'bool'}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, + } + + def __init__( + self, + *, + version: str, + delete: bool, + read: bool, + write: bool, + retention_policy: "RetentionPolicy", + **kwargs + ): super(Logging, self).__init__(**kwargs) self.version = version self.delete = delete @@ -346,20 +376,19 @@ def __init__(self, *, version: str, delete: bool, read: bool, write: bool, reten self.retention_policy = retention_policy -class Metrics(Model): +class Metrics(msrest.serialization.Model): """Metrics. All required parameters must be populated in order to send to Azure. :param version: The version of Storage Analytics to configure. :type version: str - :param enabled: Required. Indicates whether metrics are enabled for the - Queue service. + :param enabled: Required. Indicates whether metrics are enabled for the Queue service. :type enabled: bool - :param include_apis: Indicates whether metrics should generate summary - statistics for called API operations. + :param include_apis: Indicates whether metrics should generate summary statistics for called + API operations. :type include_apis: bool - :param retention_policy: + :param retention_policy: the retention policy. :type retention_policy: ~azure.storage.queue.models.RetentionPolicy """ @@ -368,15 +397,21 @@ class Metrics(Model): } _attribute_map = { - 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool', 'xml': {'name': 'IncludeAPIs'}}, - 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy', 'xml': {'name': 'RetentionPolicy'}}, - } - _xml_map = { - } - - def __init__(self, *, enabled: bool, version: str=None, include_apis: bool=None, retention_policy=None, **kwargs) -> None: + 'version': {'key': 'Version', 'type': 'str'}, + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'include_apis': {'key': 'IncludeAPIs', 'type': 'bool'}, + 'retention_policy': {'key': 'RetentionPolicy', 'type': 'RetentionPolicy'}, + } + + def __init__( + self, + *, + enabled: bool, + version: Optional[str] = None, + include_apis: Optional[bool] = None, + retention_policy: Optional["RetentionPolicy"] = None, + **kwargs + ): super(Metrics, self).__init__(**kwargs) self.version = version self.enabled = enabled @@ -384,22 +419,19 @@ def __init__(self, *, enabled: bool, version: str=None, include_apis: bool=None, self.retention_policy = retention_policy -class PeekedMessageItem(Model): - """The object returned in the QueueMessageList array when calling Peek - Messages on a Queue. +class PeekedMessageItem(msrest.serialization.Model): + """The object returned in the QueueMessageList array when calling Peek Messages on a Queue. All required parameters must be populated in order to send to Azure. :param message_id: Required. The Id of the Message. :type message_id: str - :param insertion_time: Required. The time the Message was inserted into - the Queue. - :type insertion_time: datetime - :param expiration_time: Required. The time that the Message will expire - and be automatically deleted. - :type expiration_time: datetime - :param dequeue_count: Required. The number of times the message has been - dequeued. + :param insertion_time: Required. The time the Message was inserted into the Queue. + :type insertion_time: ~datetime.datetime + :param expiration_time: Required. The time that the Message will expire and be automatically + deleted. + :type expiration_time: ~datetime.datetime + :param dequeue_count: Required. The number of times the message has been dequeued. :type dequeue_count: long :param message_text: Required. The content of the Message. :type message_text: str @@ -414,17 +446,26 @@ class PeekedMessageItem(Model): } _attribute_map = { - 'message_id': {'key': 'MessageId', 'type': 'str', 'xml': {'name': 'MessageId'}}, - 'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123', 'xml': {'name': 'InsertionTime'}}, - 'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123', 'xml': {'name': 'ExpirationTime'}}, - 'dequeue_count': {'key': 'DequeueCount', 'type': 'long', 'xml': {'name': 'DequeueCount'}}, - 'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}}, + 'message_id': {'key': 'MessageId', 'type': 'str'}, + 'insertion_time': {'key': 'InsertionTime', 'type': 'rfc-1123'}, + 'expiration_time': {'key': 'ExpirationTime', 'type': 'rfc-1123'}, + 'dequeue_count': {'key': 'DequeueCount', 'type': 'long'}, + 'message_text': {'key': 'MessageText', 'type': 'str'}, } _xml_map = { 'name': 'QueueMessage' } - def __init__(self, *, message_id: str, insertion_time, expiration_time, dequeue_count: int, message_text: str, **kwargs) -> None: + def __init__( + self, + *, + message_id: str, + insertion_time: datetime.datetime, + expiration_time: datetime.datetime, + dequeue_count: int, + message_text: str, + **kwargs + ): super(PeekedMessageItem, self).__init__(**kwargs) self.message_id = message_id self.insertion_time = insertion_time @@ -433,14 +474,14 @@ def __init__(self, *, message_id: str, insertion_time, expiration_time, dequeue_ self.message_text = message_text -class QueueItem(Model): +class QueueItem(msrest.serialization.Model): """An Azure Storage Queue. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the Queue. :type name: str - :param metadata: + :param metadata: Dictionary of :code:``. :type metadata: dict[str, str] """ @@ -449,25 +490,31 @@ class QueueItem(Model): } _attribute_map = { - 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, - 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, + 'name': {'key': 'Name', 'type': 'str'}, + 'metadata': {'key': 'Metadata', 'type': '{str}'}, } _xml_map = { 'name': 'Queue' } - def __init__(self, *, name: str, metadata=None, **kwargs) -> None: + def __init__( + self, + *, + name: str, + metadata: Optional[Dict[str, str]] = None, + **kwargs + ): super(QueueItem, self).__init__(**kwargs) self.name = name self.metadata = metadata -class QueueMessage(Model): +class QueueMessage(msrest.serialization.Model): """A Message object which can be stored in a Queue. All required parameters must be populated in order to send to Azure. - :param message_text: Required. The content of the message + :param message_text: Required. The content of the message. :type message_text: str """ @@ -476,27 +523,29 @@ class QueueMessage(Model): } _attribute_map = { - 'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}}, - } - _xml_map = { + 'message_text': {'key': 'MessageText', 'type': 'str'}, } - def __init__(self, *, message_text: str, **kwargs) -> None: + def __init__( + self, + *, + message_text: str, + **kwargs + ): super(QueueMessage, self).__init__(**kwargs) self.message_text = message_text -class RetentionPolicy(Model): +class RetentionPolicy(msrest.serialization.Model): """the retention policy. All required parameters must be populated in order to send to Azure. - :param enabled: Required. Indicates whether a retention policy is enabled - for the storage service + :param enabled: Required. Indicates whether a retention policy is enabled for the storage + service. :type enabled: bool - :param days: Indicates the number of days that metrics or logging or - soft-deleted data should be retained. All data older than this value will - be deleted + :param days: Indicates the number of days that metrics or logging or soft-deleted data should + be retained. All data older than this value will be deleted. :type days: int """ @@ -506,26 +555,30 @@ class RetentionPolicy(Model): } _attribute_map = { - 'enabled': {'key': 'Enabled', 'type': 'bool', 'xml': {'name': 'Enabled'}}, - 'days': {'key': 'Days', 'type': 'int', 'xml': {'name': 'Days'}}, - } - _xml_map = { + 'enabled': {'key': 'Enabled', 'type': 'bool'}, + 'days': {'key': 'Days', 'type': 'int'}, } - def __init__(self, *, enabled: bool, days: int=None, **kwargs) -> None: + def __init__( + self, + *, + enabled: bool, + days: Optional[int] = None, + **kwargs + ): super(RetentionPolicy, self).__init__(**kwargs) self.enabled = enabled self.days = days -class SignedIdentifier(Model): +class SignedIdentifier(msrest.serialization.Model): """signed identifier. All required parameters must be populated in order to send to Azure. - :param id: Required. a unique id + :param id: Required. a unique id. :type id: str - :param access_policy: The access policy + :param access_policy: The access policy. :type access_policy: ~azure.storage.queue.models.AccessPolicy """ @@ -534,19 +587,23 @@ class SignedIdentifier(Model): } _attribute_map = { - 'id': {'key': 'Id', 'type': 'str', 'xml': {'name': 'Id'}}, - 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy', 'xml': {'name': 'AccessPolicy'}}, - } - _xml_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'access_policy': {'key': 'AccessPolicy', 'type': 'AccessPolicy'}, } - def __init__(self, *, id: str, access_policy=None, **kwargs) -> None: + def __init__( + self, + *, + id: str, + access_policy: Optional["AccessPolicy"] = None, + **kwargs + ): super(SignedIdentifier, self).__init__(**kwargs) self.id = id self.access_policy = access_policy -class StorageError(Model): +class StorageError(msrest.serialization.Model): """StorageError. :param message: @@ -554,57 +611,50 @@ class StorageError(Model): """ _attribute_map = { - 'message': {'key': 'Message', 'type': 'str', 'xml': {'name': 'Message'}}, - } - _xml_map = { + 'message': {'key': 'Message', 'type': 'str'}, } - def __init__(self, *, message: str=None, **kwargs) -> None: + def __init__( + self, + *, + message: Optional[str] = None, + **kwargs + ): super(StorageError, self).__init__(**kwargs) self.message = message -class StorageErrorException(HttpResponseError): - """Server responsed with exception of type: 'StorageError'. - - :param deserialize: A deserializer - :param response: Server response to be deserialized. - """ - - def __init__(self, response, deserialize, *args): - - model_name = 'StorageError' - self.error = deserialize(model_name, response) - if self.error is None: - self.error = deserialize.dependencies[model_name]() - super(StorageErrorException, self).__init__(response=response) - - -class StorageServiceProperties(Model): +class StorageServiceProperties(msrest.serialization.Model): """Storage Service Properties. - :param logging: Azure Analytics Logging settings + :param logging: Azure Analytics Logging settings. :type logging: ~azure.storage.queue.models.Logging - :param hour_metrics: A summary of request statistics grouped by API in - hourly aggregates for queues + :param hour_metrics: A summary of request statistics grouped by API in hourly aggregates for + queues. :type hour_metrics: ~azure.storage.queue.models.Metrics - :param minute_metrics: a summary of request statistics grouped by API in - minute aggregates for queues + :param minute_metrics: a summary of request statistics grouped by API in minute aggregates for + queues. :type minute_metrics: ~azure.storage.queue.models.Metrics :param cors: The set of CORS rules. :type cors: list[~azure.storage.queue.models.CorsRule] """ _attribute_map = { - 'logging': {'key': 'Logging', 'type': 'Logging', 'xml': {'name': 'Logging'}}, - 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics', 'xml': {'name': 'HourMetrics'}}, - 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics', 'xml': {'name': 'MinuteMetrics'}}, - 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'name': 'Cors', 'itemsName': 'CorsRule', 'wrapped': True}}, - } - _xml_map = { - } - - def __init__(self, *, logging=None, hour_metrics=None, minute_metrics=None, cors=None, **kwargs) -> None: + 'logging': {'key': 'Logging', 'type': 'Logging'}, + 'hour_metrics': {'key': 'HourMetrics', 'type': 'Metrics'}, + 'minute_metrics': {'key': 'MinuteMetrics', 'type': 'Metrics'}, + 'cors': {'key': 'Cors', 'type': '[CorsRule]', 'xml': {'wrapped': True}}, + } + + def __init__( + self, + *, + logging: Optional["Logging"] = None, + hour_metrics: Optional["Metrics"] = None, + minute_metrics: Optional["Metrics"] = None, + cors: Optional[List["CorsRule"]] = None, + **kwargs + ): super(StorageServiceProperties, self).__init__(**kwargs) self.logging = logging self.hour_metrics = hour_metrics @@ -612,20 +662,22 @@ def __init__(self, *, logging=None, hour_metrics=None, minute_metrics=None, cors self.cors = cors -class StorageServiceStats(Model): +class StorageServiceStats(msrest.serialization.Model): """Stats for the storage service. - :param geo_replication: Geo-Replication information for the Secondary - Storage Service + :param geo_replication: Geo-Replication information for the Secondary Storage Service. :type geo_replication: ~azure.storage.queue.models.GeoReplication """ _attribute_map = { - 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication', 'xml': {'name': 'GeoReplication'}}, - } - _xml_map = { + 'geo_replication': {'key': 'GeoReplication', 'type': 'GeoReplication'}, } - def __init__(self, *, geo_replication=None, **kwargs) -> None: + def __init__( + self, + *, + geo_replication: Optional["GeoReplication"] = None, + **kwargs + ): super(StorageServiceStats, self).__init__(**kwargs) self.geo_replication = geo_replication diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/operations/__init__.py b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/operations/__init__.py index d600f52..c0abe55 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/operations/__init__.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/operations/__init__.py @@ -1,12 +1,9 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from ._service_operations import ServiceOperations diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/operations/_message_id_operations.py b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/operations/_message_id_operations.py index 8be7a24..0d1a4a7 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/operations/_message_id_operations.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/operations/_message_id_operations.py @@ -1,184 +1,211 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings -from azure.core.exceptions import map_error +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse -from .. import models +from .. import models as _models +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MessageIdOperations(object): """MessageIdOperations operations. - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.queue.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ - models = models + models = _models def __init__(self, client, config, serializer, deserializer): - self._client = client self._serialize = serializer self._deserialize = deserializer - self._config = config - def update(self, pop_receipt, visibilitytimeout, queue_message=None, timeout=None, request_id=None, cls=None, **kwargs): - """The Update operation was introduced with version 2011-08-18 of the - Queue service API. The Update Message operation updates the visibility - timeout of a message. You can also use this operation to update the - contents of a message. A message must be in a format that can be - included in an XML request with UTF-8 encoding, and the encoded message - can be up to 64KB in size. - - :param pop_receipt: Required. Specifies the valid pop receipt value - returned from an earlier call to the Get Messages or Update Message - operation. + def update( + self, + pop_receipt, # type: str + visibilitytimeout, # type: int + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + queue_message=None, # type: Optional["_models.QueueMessage"] + **kwargs # type: Any + ): + # type: (...) -> None + """The Update operation was introduced with version 2011-08-18 of the Queue service API. The + Update Message operation updates the visibility timeout of a message. You can also use this + operation to update the contents of a message. A message must be in a format that can be + included in an XML request with UTF-8 encoding, and the encoded message can be up to 64KB in + size. + + :param pop_receipt: Required. Specifies the valid pop receipt value returned from an earlier + call to the Get Messages or Update Message operation. :type pop_receipt: str - :param visibilitytimeout: Optional. Specifies the new visibility - timeout value, in seconds, relative to server time. The default value - is 30 seconds. A specified value must be larger than or equal to 1 - second, and cannot be larger than 7 days, or larger than 2 hours on - REST protocol versions prior to version 2011-08-18. The visibility - timeout of a message can be set to a value later than the expiry time. + :param visibilitytimeout: Optional. Specifies the new visibility timeout value, in seconds, + relative to server time. The default value is 30 seconds. A specified value must be larger than + or equal to 1 second, and cannot be larger than 7 days, or larger than 2 hours on REST protocol + versions prior to version 2011-08-18. The visibility timeout of a message can be set to a value + later than the expiry time. :type visibilitytimeout: int - :param queue_message: A Message object which can be stored in a Queue - :type queue_message: ~azure.storage.queue.models.QueueMessage - :param timeout: The The timeout parameter is expressed in seconds. For - more information, see Setting Timeouts for Queue Service Operations.. :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :param queue_message: A Message object which can be stored in a Queue. + :type queue_message: ~azure.storage.queue.models.QueueMessage + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) :rtype: None - :raises: - :class:`StorageErrorException` + :raises: ~azure.core.exceptions.HttpResponseError """ - error_map = kwargs.pop('error_map', None) + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + # Construct URL - url = self.update.metadata['url'] + url = self.update.metadata['url'] # type: ignore path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} + query_parameters = {} # type: Dict[str, Any] query_parameters['popreceipt'] = self._serialize.query("pop_receipt", pop_receipt, 'str') query_parameters['visibilitytimeout'] = self._serialize.query("visibilitytimeout", visibilitytimeout, 'int', maximum=604800, minimum=0) if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters = {} # type: Dict[str, Any] header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct body + body_content_kwargs = {} # type: Dict[str, Any] if queue_message is not None: - body_content = self._serialize.body(queue_message, 'QueueMessage') + body_content = self._serialize.body(queue_message, 'QueueMessage', is_xml=True) else: body_content = None - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + response_headers['x-ms-popreceipt']=self._deserialize('str', response.headers.get('x-ms-popreceipt')) + response_headers['x-ms-time-next-visible']=self._deserialize('rfc-1123', response.headers.get('x-ms-time-next-visible')) if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-popreceipt': self._deserialize('str', response.headers.get('x-ms-popreceipt')), - 'x-ms-time-next-visible': self._deserialize('rfc-1123', response.headers.get('x-ms-time-next-visible')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - update.metadata = {'url': '/{queueName}/messages/{messageid}'} - - def delete(self, pop_receipt, timeout=None, request_id=None, cls=None, **kwargs): + return cls(pipeline_response, None, response_headers) + + update.metadata = {'url': '/{queueName}/messages/{messageid}'} # type: ignore + + def delete( + self, + pop_receipt, # type: str + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None """The Delete operation deletes the specified message. - :param pop_receipt: Required. Specifies the valid pop receipt value - returned from an earlier call to the Get Messages or Update Message - operation. + :param pop_receipt: Required. Specifies the valid pop receipt value returned from an earlier + call to the Get Messages or Update Message operation. :type pop_receipt: str - :param timeout: The The timeout parameter is expressed in seconds. For - more information, see Setting Timeouts for Queue Service Operations.. :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) :rtype: None - :raises: - :class:`StorageErrorException` + :raises: ~azure.core.exceptions.HttpResponseError """ - error_map = kwargs.pop('error_map', None) + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/xml" + # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} + query_parameters = {} # type: Dict[str, Any] query_parameters['popreceipt'] = self._serialize.query("pop_receipt", pop_receipt, 'str') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) # Construct headers - header_parameters = {} + header_parameters = {} # type: Dict[str, Any] header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{queueName}/messages/{messageid}'} + return cls(pipeline_response, None, response_headers) + + delete.metadata = {'url': '/{queueName}/messages/{messageid}'} # type: ignore diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/operations/_messages_operations.py b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/operations/_messages_operations.py index ccd00ce..0849705 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/operations/_messages_operations.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/operations/_messages_operations.py @@ -1,85 +1,98 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings -from azure.core.exceptions import map_error +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse -from .. import models +from .. import models as _models +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class MessagesOperations(object): """MessagesOperations operations. - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.queue.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar peekonly: . Constant value: "true". """ - models = models + models = _models def __init__(self, client, config, serializer, deserializer): - self._client = client self._serialize = serializer self._deserialize = deserializer - self._config = config - self.peekonly = "true" - def dequeue(self, number_of_messages=None, visibilitytimeout=None, timeout=None, request_id=None, cls=None, **kwargs): - """The Dequeue operation retrieves one or more messages from the front of - the queue. - - :param number_of_messages: Optional. A nonzero integer value that - specifies the number of messages to retrieve from the queue, up to a - maximum of 32. If fewer are visible, the visible messages are - returned. By default, a single message is retrieved from the queue - with this operation. + def dequeue( + self, + number_of_messages=None, # type: Optional[int] + visibilitytimeout=None, # type: Optional[int] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> List["_models.DequeuedMessageItem"] + """The Dequeue operation retrieves one or more messages from the front of the queue. + + :param number_of_messages: Optional. A nonzero integer value that specifies the number of + messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible + messages are returned. By default, a single message is retrieved from the queue with this + operation. :type number_of_messages: int - :param visibilitytimeout: Optional. Specifies the new visibility - timeout value, in seconds, relative to server time. The default value - is 30 seconds. A specified value must be larger than or equal to 1 - second, and cannot be larger than 7 days, or larger than 2 hours on - REST protocol versions prior to version 2011-08-18. The visibility - timeout of a message can be set to a value later than the expiry time. + :param visibilitytimeout: Optional. Specifies the new visibility timeout value, in seconds, + relative to server time. The default value is 30 seconds. A specified value must be larger than + or equal to 1 second, and cannot be larger than 7 days, or larger than 2 hours on REST protocol + versions prior to version 2011-08-18. The visibility timeout of a message can be set to a value + later than the expiry time. :type visibilitytimeout: int - :param timeout: The The timeout parameter is expressed in seconds. For - more information, see Setting Timeouts for Queue Service Operations.. :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of DequeuedMessageItem, or the result of cls(response) :rtype: list[~azure.storage.queue.models.DequeuedMessageItem] - :raises: - :class:`StorageErrorException` + :raises: ~azure.core.exceptions.HttpResponseError """ - error_map = kwargs.pop('error_map', None) + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.DequeuedMessageItem"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/xml" + # Construct URL - url = self.dequeue.metadata['url'] + url = self.dequeue.metadata['url'] # type: ignore path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} + query_parameters = {} # type: Dict[str, Any] if number_of_messages is not None: query_parameters['numofmessages'] = self._serialize.query("number_of_messages", number_of_messages, 'int', minimum=1) if visibilitytimeout is not None: @@ -88,145 +101,159 @@ def dequeue(self, number_of_messages=None, visibilitytimeout=None, timeout=None, query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' + header_parameters = {} # type: Dict[str, Any] header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[DequeuedMessageItem]', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('[DequeuedMessageItem]', pipeline_response) if cls: - return cls(response, deserialized, header_dict) + return cls(pipeline_response, deserialized, response_headers) return deserialized - dequeue.metadata = {'url': '/{queueName}/messages'} - - def clear(self, timeout=None, request_id=None, cls=None, **kwargs): + dequeue.metadata = {'url': '/{queueName}/messages'} # type: ignore + + def clear( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None """The Clear operation deletes all messages from the specified queue. - :param timeout: The The timeout parameter is expressed in seconds. For - more information, see Setting Timeouts for Queue Service Operations.. :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) :rtype: None - :raises: - :class:`StorageErrorException` + :raises: ~azure.core.exceptions.HttpResponseError """ - error_map = kwargs.pop('error_map', None) + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/xml" + # Construct URL - url = self.clear.metadata['url'] + url = self.clear.metadata['url'] # type: ignore path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} + query_parameters = {} # type: Dict[str, Any] if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) # Construct headers - header_parameters = {} + header_parameters = {} # type: Dict[str, Any] header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - clear.metadata = {'url': '/{queueName}/messages'} - - def enqueue(self, queue_message=None, visibilitytimeout=None, message_time_to_live=None, timeout=None, request_id=None, cls=None, **kwargs): - """The Enqueue operation adds a new message to the back of the message - queue. A visibility timeout can also be specified to make the message - invisible until the visibility timeout expires. A message must be in a - format that can be included in an XML request with UTF-8 encoding. The - encoded message can be up to 64 KB in size for versions 2011-08-18 and - newer, or 8 KB in size for previous versions. - - :param queue_message: A Message object which can be stored in a Queue + return cls(pipeline_response, None, response_headers) + + clear.metadata = {'url': '/{queueName}/messages'} # type: ignore + + def enqueue( + self, + queue_message, # type: "_models.QueueMessage" + visibilitytimeout=None, # type: Optional[int] + message_time_to_live=None, # type: Optional[int] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> List["_models.EnqueuedMessage"] + """The Enqueue operation adds a new message to the back of the message queue. A visibility timeout + can also be specified to make the message invisible until the visibility timeout expires. A + message must be in a format that can be included in an XML request with UTF-8 encoding. The + encoded message can be up to 64 KB in size for versions 2011-08-18 and newer, or 8 KB in size + for previous versions. + + :param queue_message: A Message object which can be stored in a Queue. :type queue_message: ~azure.storage.queue.models.QueueMessage - :param visibilitytimeout: Optional. Specifies the new visibility - timeout value, in seconds, relative to server time. The default value - is 30 seconds. A specified value must be larger than or equal to 1 - second, and cannot be larger than 7 days, or larger than 2 hours on - REST protocol versions prior to version 2011-08-18. The visibility - timeout of a message can be set to a value later than the expiry time. + :param visibilitytimeout: Optional. If specified, the request must be made using an x-ms- + version of 2011-08-18 or later. If not specified, the default value is 0. Specifies the new + visibility timeout value, in seconds, relative to server time. The new value must be larger + than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message + cannot be set to a value later than the expiry time. visibilitytimeout should be set to a value + smaller than the time-to-live value. :type visibilitytimeout: int - :param message_time_to_live: Optional. Specifies the time-to-live - interval for the message, in seconds. Prior to version 2017-07-29, the - maximum time-to-live allowed is 7 days. For version 2017-07-29 or - later, the maximum time-to-live can be any positive number, as well as - -1 indicating that the message does not expire. If this parameter is - omitted, the default time-to-live is 7 days. + :param message_time_to_live: Optional. Specifies the time-to-live interval for the message, in + seconds. Prior to version 2017-07-29, the maximum time-to-live allowed is 7 days. For version + 2017-07-29 or later, the maximum time-to-live can be any positive number, as well as -1 + indicating that the message does not expire. If this parameter is omitted, the default time-to- + live is 7 days. :type message_time_to_live: int - :param timeout: The The timeout parameter is expressed in seconds. For - more information, see Setting Timeouts for Queue Service Operations.. :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of EnqueuedMessage, or the result of cls(response) :rtype: list[~azure.storage.queue.models.EnqueuedMessage] - :raises: - :class:`StorageErrorException` + :raises: ~azure.core.exceptions.HttpResponseError """ - error_map = kwargs.pop('error_map', None) + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.EnqueuedMessage"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" + # Construct URL - url = self.enqueue.metadata['url'] + url = self.enqueue.metadata['url'] # type: ignore path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} + query_parameters = {} # type: Dict[str, Any] if visibilitytimeout is not None: query_parameters['visibilitytimeout'] = self._serialize.query("visibilitytimeout", visibilitytimeout, 'int', maximum=604800, minimum=0) if message_time_to_live is not None: @@ -235,116 +262,112 @@ def enqueue(self, queue_message=None, visibilitytimeout=None, message_time_to_li query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters = {} # type: Dict[str, Any] header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct body - if queue_message is not None: - body_content = self._serialize.body(queue_message, 'QueueMessage') - else: - body_content = None - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(queue_message, 'QueueMessage', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 201: - deserialized = self._deserialize('[EnqueuedMessage]', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('[EnqueuedMessage]', pipeline_response) if cls: - return cls(response, deserialized, header_dict) + return cls(pipeline_response, deserialized, response_headers) return deserialized - enqueue.metadata = {'url': '/{queueName}/messages'} - - def peek(self, number_of_messages=None, timeout=None, request_id=None, cls=None, **kwargs): - """The Peek operation retrieves one or more messages from the front of the - queue, but does not alter the visibility of the message. - - :param number_of_messages: Optional. A nonzero integer value that - specifies the number of messages to retrieve from the queue, up to a - maximum of 32. If fewer are visible, the visible messages are - returned. By default, a single message is retrieved from the queue - with this operation. + enqueue.metadata = {'url': '/{queueName}/messages'} # type: ignore + + def peek( + self, + number_of_messages=None, # type: Optional[int] + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> List["_models.PeekedMessageItem"] + """The Peek operation retrieves one or more messages from the front of the queue, but does not + alter the visibility of the message. + + :param number_of_messages: Optional. A nonzero integer value that specifies the number of + messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible + messages are returned. By default, a single message is retrieved from the queue with this + operation. :type number_of_messages: int - :param timeout: The The timeout parameter is expressed in seconds. For - more information, see Setting Timeouts for Queue Service Operations.. :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: list or the result of cls(response) + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of PeekedMessageItem, or the result of cls(response) :rtype: list[~azure.storage.queue.models.PeekedMessageItem] - :raises: - :class:`StorageErrorException` + :raises: ~azure.core.exceptions.HttpResponseError """ - error_map = kwargs.pop('error_map', None) + cls = kwargs.pop('cls', None) # type: ClsType[List["_models.PeekedMessageItem"]] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + peekonly = "true" + accept = "application/xml" + # Construct URL - url = self.peek.metadata['url'] + url = self.peek.metadata['url'] # type: ignore path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} + query_parameters = {} # type: Dict[str, Any] + query_parameters['peekonly'] = self._serialize.query("peekonly", peekonly, 'str') if number_of_messages is not None: query_parameters['numofmessages'] = self._serialize.query("number_of_messages", number_of_messages, 'int', minimum=1) if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['peekonly'] = self._serialize.query("self.peekonly", self.peekonly, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' + header_parameters = {} # type: Dict[str, Any] header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('[PeekedMessageItem]', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('[PeekedMessageItem]', pipeline_response) if cls: - return cls(response, deserialized, header_dict) + return cls(pipeline_response, deserialized, response_headers) return deserialized - peek.metadata = {'url': '/{queueName}/messages'} + peek.metadata = {'url': '/{queueName}/messages'} # type: ignore diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/operations/_queue_operations.py b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/operations/_queue_operations.py index 02618d6..784057f 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_generated/operations/_queue_operations.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_generated/operations/_queue_operations.py @@ -1,432 +1,491 @@ # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# +# Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings -from azure.core.exceptions import map_error +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse -from .. import models +from .. import models as _models +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class QueueOperations(object): """QueueOperations operations. - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + :ivar models: Alias to model classes used in this operation group. + :type models: ~azure.storage.queue.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ - models = models + models = _models def __init__(self, client, config, serializer, deserializer): - self._client = client self._serialize = serializer self._deserialize = deserializer - self._config = config - def create(self, timeout=None, metadata=None, request_id=None, cls=None, **kwargs): + def create( + self, + timeout=None, # type: Optional[int] + metadata=None, # type: Optional[str] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None """creates a new queue under the given account. - :param timeout: The The timeout parameter is expressed in seconds. For - more information, see Setting Timeouts for Queue Service Operations.. :type timeout: int - :param metadata: Optional. Include this parameter to specify that the - queue's metadata be returned as part of the response body. Note that - metadata requested with this parameter must be stored in accordance - with the naming restrictions imposed by the 2009-09-19 version of the - Queue service. Beginning with this version, all metadata names must - adhere to the naming conventions for C# identifiers. + :param metadata: Optional. Include this parameter to specify that the queue's metadata be + returned as part of the response body. Note that metadata requested with this parameter must be + stored in accordance with the naming restrictions imposed by the 2009-09-19 version of the + Queue service. Beginning with this version, all metadata names must adhere to the naming + conventions for C# identifiers. :type metadata: str - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) :rtype: None - :raises: - :class:`StorageErrorException` + :raises: ~azure.core.exceptions.HttpResponseError """ - error_map = kwargs.pop('error_map', None) + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/xml" + # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} + query_parameters = {} # type: Dict[str, Any] if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) # Construct headers - header_parameters = {} + header_parameters = {} # type: Dict[str, Any] if metadata is not None: header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str') header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.put(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [201, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 201: + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + + if response.status_code == 204: + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - create.metadata = {'url': '/{queueName}'} - - def delete(self, timeout=None, request_id=None, cls=None, **kwargs): + return cls(pipeline_response, None, response_headers) + + create.metadata = {'url': '/{queueName}'} # type: ignore + + def delete( + self, + timeout=None, # type: Optional[int] + request_id_parameter=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None """operation permanently deletes the specified queue. - :param timeout: The The timeout parameter is expressed in seconds. For - more information, see Setting Timeouts for Queue Service Operations.. :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) :rtype: None - :raises: - :class:`StorageErrorException` + :raises: ~azure.core.exceptions.HttpResponseError """ - error_map = kwargs.pop('error_map', None) + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + accept = "application/xml" + # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} + query_parameters = {} # type: Dict[str, Any] if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) # Construct headers - header_parameters = {} + header_parameters = {} # type: Dict[str, Any] header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - delete.metadata = {'url': '/{queueName}'} - - def get_properties(self, timeout=None, request_id=None, cls=None, **kwargs): - """Retrieves user-defined metadata and queue properties on the specified - queue. Metadata is associated with the queue as name-values pairs. - - :param timeout: The The timeout parameter is expressed in seconds. For - more information, see None + """sets stored access policies for the queue that may be used with Shared Access Signatures. + + :param timeout: The The timeout parameter is expressed in seconds. For more information, see None + """Sets properties for a storage account's Queue service endpoint, including properties for + Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. :param storage_service_properties: The StorageService properties. - :type storage_service_properties: - ~azure.storage.queue.models.StorageServiceProperties - :param timeout: The The timeout parameter is expressed in seconds. For - more information, see Setting Timeouts for Queue Service Operations.. :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: None or the result of cls(response) + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None, or the result of cls(response) :rtype: None - :raises: - :class:`StorageErrorException` + :raises: ~azure.core.exceptions.HttpResponseError """ - error_map = kwargs.pop('error_map', None) + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) + restype = "service" comp = "properties" + content_type = kwargs.pop("content_type", "application/xml") + accept = "application/xml" # Construct URL - url = self.set_properties.metadata['url'] + url = self.set_properties.metadata['url'] # type: ignore path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} + query_parameters = {} # type: Dict[str, Any] + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['restype'] = self._serialize.query("self.restype", self.restype, 'str') - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters = {} # type: Dict[str, Any] header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') - - # Construct body - body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') + + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True) + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) if cls: - response_headers = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } - return cls(response, None, response_headers) - set_properties.metadata = {'url': '/'} - - def get_properties(self, timeout=None, request_id=None, cls=None, **kwargs): - """gets the properties of a storage account's Queue service, including - properties for Storage Analytics and CORS (Cross-Origin Resource - Sharing) rules. - - :param timeout: The The timeout parameter is expressed in seconds. For - more information, see "_models.ListQueuesSegmentResponse" + """The List Queues Segment operation returns a list of the queues under the specified account. + + :param prefix: Filters the results to return only queues whose name begins with the specified + prefix. :type prefix: str - :param marker: A string value that identifies the portion of the list - of queues to be returned with the next listing operation. The - operation returns the NextMarker value within the response body if the - listing operation did not return all queues remaining to be listed - with the current page. The NextMarker value can be used as the value - for the marker parameter in a subsequent call to request the next page - of list items. The marker value is opaque to the client. + :param marker: A string value that identifies the portion of the list of queues to be returned + with the next listing operation. The operation returns the NextMarker value within the response + body if the listing operation did not return all queues remaining to be listed with the current + page. The NextMarker value can be used as the value for the marker parameter in a subsequent + call to request the next page of list items. The marker value is opaque to the client. :type marker: str - :param maxresults: Specifies the maximum number of queues to return. - If the request does not specify maxresults, or specifies a value - greater than 5000, the server will return up to 5000 items. Note that - if the listing operation crosses a partition boundary, then the - service will return a continuation token for retrieving the remainder - of the results. For this reason, it is possible that the service will - return fewer results than specified by maxresults, or than the default - of 5000. + :param maxresults: Specifies the maximum number of queues to return. If the request does not + specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 + items. Note that if the listing operation crosses a partition boundary, then the service will + return a continuation token for retrieving the remainder of the results. For this reason, it is + possible that the service will return fewer results than specified by maxresults, or than the + default of 5000. :type maxresults: int - :param include: Include this parameter to specify that the queues's - metadata be returned as part of the response body. - :type include: list[str or - ~azure.storage.queue.models.ListQueuesIncludeType] - :param timeout: The The timeout parameter is expressed in seconds. For - more information, see Setting Timeouts for Queue Service Operations.. :type timeout: int - :param request_id: Provides a client-generated, opaque value with a 1 - KB character limit that is recorded in the analytics logs when storage - analytics logging is enabled. - :type request_id: str - :param callable cls: A custom type or function that will be passed the - direct response - :return: ListQueuesSegmentResponse or the result of cls(response) + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. + :type request_id_parameter: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListQueuesSegmentResponse, or the result of cls(response) :rtype: ~azure.storage.queue.models.ListQueuesSegmentResponse - :raises: - :class:`StorageErrorException` + :raises: ~azure.core.exceptions.HttpResponseError """ - error_map = kwargs.pop('error_map', None) + cls = kwargs.pop('cls', None) # type: ClsType["_models.ListQueuesSegmentResponse"] + error_map = { + 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError + } + error_map.update(kwargs.pop('error_map', {})) comp = "list" + accept = "application/xml" # Construct URL - url = self.list_queues_segment.metadata['url'] + url = self.list_queues_segment.metadata['url'] # type: ignore path_format_arguments = { - 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters - query_parameters = {} + query_parameters = {} # type: Dict[str, Any] + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') if prefix is not None: query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str') if marker is not None: @@ -310,40 +336,34 @@ def list_queues_segment(self, prefix=None, marker=None, maxresults=None, include if maxresults is not None: query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) if include is not None: - query_parameters['include'] = self._serialize.query("include", include, '[ListQueuesIncludeType]', div=',') + query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/xml' + header_parameters = {} # type: Dict[str, Any] header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') - if request_id is not None: - header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if request_id_parameter is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str') + header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - # Construct and send request request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.StorageErrorException(response, self._deserialize) - - header_dict = {} - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('ListQueuesSegmentResponse', response) - header_dict = { - 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), - 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), - 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), - 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), - } + error = self._deserialize(_models.StorageError, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id')) + response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version')) + response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date')) + deserialized = self._deserialize('ListQueuesSegmentResponse', pipeline_response) if cls: - return cls(response, deserialized, header_dict) + return cls(pipeline_response, deserialized, response_headers) return deserialized - list_queues_segment.metadata = {'url': '/'} + list_queues_segment.metadata = {'url': '/'} # type: ignore diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_message_encoding.py b/azure/multiapi/storagev2/queue/v2018_03_28/_message_encoding.py index 52b6439..e9b4f88 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_message_encoding.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_message_encoding.py @@ -122,6 +122,7 @@ class BinaryBase64DecodePolicy(MessageDecodePolicy): """ def decode(self, content, response): + response = response.http_response try: return b64decode(content.encode('utf-8')) except (ValueError, TypeError) as error: diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_models.py b/azure/multiapi/storagev2/queue/v2018_03_28/_models.py index 24d3b4b..943de5c 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_models.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_models.py @@ -7,10 +7,10 @@ # pylint: disable=super-init-not-called from typing import List # pylint: disable=unused-import +from azure.core.exceptions import HttpResponseError from azure.core.paging import PageIterator from ._shared.response_handlers import return_context_and_deserialized, process_storage_error from ._shared.models import DictMixin -from ._generated.models import StorageErrorException from ._generated.models import AccessPolicy as GenAccessPolicy from ._generated.models import Logging as GeneratedLogging from ._generated.models import Metrics as GeneratedMetrics @@ -279,7 +279,7 @@ def __init__(self, command, results_per_page=None, continuation_token=None): def _get_next_cb(self, continuation_token): try: return self._command(number_of_messages=self.results_per_page) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) def _extract_data_cb(self, messages): # pylint: disable=no-self-use @@ -349,7 +349,7 @@ def _get_next_cb(self, continuation_token): maxresults=self.results_per_page, cls=return_context_and_deserialized, use_location=self.location_mode) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) def _extract_data_cb(self, get_next_return): diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_queue_client.py b/azure/multiapi/storagev2/queue/v2018_03_28/_queue_client.py index 7bebbdb..c1e33aa 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_queue_client.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_queue_client.py @@ -16,6 +16,7 @@ import six +from azure.core.exceptions import HttpResponseError from azure.core.paging import ItemPaged from azure.core.tracing.decorator import distributed_trace from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query @@ -26,8 +27,8 @@ return_headers_and_deserialized) from ._message_encoding import NoEncodePolicy, NoDecodePolicy from ._deserialize import deserialize_queue_properties, deserialize_queue_creation -from ._generated import AzureQueueStorage, VERSION -from ._generated.models import StorageErrorException, SignedIdentifier +from ._generated import AzureQueueStorage +from ._generated.models import SignedIdentifier from ._generated.models import QueueMessage as GenQueueMessage from ._models import QueueMessage, AccessPolicy, MessagesPaged @@ -40,6 +41,10 @@ class QueueClient(StorageAccountHostsMixin): """A client to interact with a specific Queue. + For more optional configuration, please click + `here `_. + :param str account_url: The URL to the storage account. In order to create a client given the full URI to the queue, use the :func:`from_queue_url` classmethod. @@ -100,7 +105,8 @@ def __init__( self._config.message_encode_policy = kwargs.get('message_encode_policy', None) or NoEncodePolicy() self._config.message_decode_policy = kwargs.get('message_decode_policy', None) or NoDecodePolicy() self._client = AzureQueueStorage(self.url, pipeline=self._pipeline) - self._client._config.version = kwargs.get('api_version', VERSION) # pylint: disable=protected-access + default_api_version = self._client._config.version # pylint: disable=protected-access + self._client._config.version = kwargs.get('api_version', default_api_version) # pylint: disable=protected-access def _format_url(self, hostname): """Format the endpoint URL according to the current location @@ -229,7 +235,7 @@ def create_queue(self, **kwargs): headers=headers, cls=deserialize_queue_creation, **kwargs) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace @@ -261,7 +267,7 @@ def delete_queue(self, **kwargs): timeout = kwargs.pop('timeout', None) try: self._client.queue.delete(timeout=timeout, **kwargs) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace @@ -291,7 +297,7 @@ def get_queue_properties(self, **kwargs): timeout=timeout, cls=deserialize_queue_properties, **kwargs) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) response.name = self.queue_name return response # type: ignore @@ -328,7 +334,7 @@ def set_queue_metadata(self, metadata=None, **kwargs): headers=headers, cls=return_response_headers, **kwargs) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace @@ -348,7 +354,7 @@ def get_queue_access_policy(self, **kwargs): timeout=timeout, cls=return_headers_and_deserialized, **kwargs) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) return {s.id: s.access_policy or AccessPolicy() for s in identifiers} @@ -403,7 +409,7 @@ def set_queue_access_policy(self, signed_identifiers, **kwargs): queue_acl=signed_identifiers or None, timeout=timeout, **kwargs) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace @@ -481,7 +487,7 @@ def send_message( # type: ignore queue_message.pop_receipt = enqueued[0].pop_receipt queue_message.next_visible_on = enqueued[0].time_next_visible return queue_message - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace @@ -537,7 +543,7 @@ def receive_message(self, **kwargs): wrapped_message = QueueMessage._from_generated( # pylint: disable=protected-access message[0]) if message != [] else None return wrapped_message - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace @@ -608,7 +614,7 @@ def receive_messages(self, **kwargs): **kwargs ) return ItemPaged(command, results_per_page=messages_per_page, page_iterator_class=MessagesPaged) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace @@ -705,7 +711,7 @@ def update_message(self, message, pop_receipt=None, content=None, **kwargs): new_message.pop_receipt = response['popreceipt'] new_message.next_visible_on = response['time_next_visible'] return new_message - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace @@ -763,7 +769,7 @@ def peek_messages(self, max_messages=None, **kwargs): for peeked in messages: wrapped_messages.append(QueueMessage._from_generated(peeked)) # pylint: disable=protected-access return wrapped_messages - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace @@ -786,7 +792,7 @@ def clear_messages(self, **kwargs): timeout = kwargs.pop('timeout', None) try: self._client.messages.clear(timeout=timeout, **kwargs) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace @@ -839,5 +845,5 @@ def delete_message(self, message, pop_receipt=None, **kwargs): queue_message_id=message_id, **kwargs ) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_queue_service_client.py b/azure/multiapi/storagev2/queue/v2018_03_28/_queue_service_client.py index 9a67862..b6f8a64 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_queue_service_client.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_queue_service_client.py @@ -13,14 +13,15 @@ except ImportError: from urlparse import urlparse # type: ignore +from azure.core.exceptions import HttpResponseError from azure.core.paging import ItemPaged from azure.core.pipeline import Pipeline from azure.core.tracing.decorator import distributed_trace from ._shared.models import LocationMode from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query from ._shared.response_handlers import process_storage_error -from ._generated import AzureQueueStorage, VERSION -from ._generated.models import StorageServiceProperties, StorageErrorException +from ._generated import AzureQueueStorage +from ._generated.models import StorageServiceProperties from ._models import ( QueuePropertiesPaged, @@ -50,6 +51,10 @@ class QueueServiceClient(StorageAccountHostsMixin): For operations relating to a specific queue, a client for this entity can be retrieved using the :func:`~get_queue_client` function. + For more optional configuration, please click + `here `_. + :param str account_url: The URL to the queue service endpoint. Any other entities included in the URL path (e.g. queue) will be discarded. This URL can be optionally @@ -103,7 +108,8 @@ def __init__( self._query_str, credential = self._format_query_string(sas_token, credential) super(QueueServiceClient, self).__init__(parsed_url, service='queue', credential=credential, **kwargs) self._client = AzureQueueStorage(self.url, pipeline=self._pipeline) - self._client._config.version = kwargs.get('api_version', VERSION) # pylint: disable=protected-access + default_api_version = self._client._config.version # pylint: disable=protected-access + self._client._config.version = kwargs.get('api_version', default_api_version) # pylint: disable=protected-access def _format_url(self, hostname): """Format the endpoint URL according to the current location @@ -176,7 +182,7 @@ def get_service_stats(self, **kwargs): stats = self._client.service.get_statistics( # type: ignore timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) return service_stats_deserialize(stats) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace @@ -204,7 +210,7 @@ def get_service_properties(self, **kwargs): try: service_props = self._client.service.get_properties(timeout=timeout, **kwargs) # type: ignore return service_properties_deserialize(service_props) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace @@ -260,7 +266,7 @@ def set_service_properties( # type: ignore ) try: return self._client.service.set_properties(props, timeout=timeout, **kwargs) # type: ignore - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client.py index e8788ec..5e524b2 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client.py @@ -3,19 +3,13 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- - +import logging +import uuid from typing import ( # pylint: disable=unused-import - Union, Optional, Any, - Iterable, - Dict, - List, - Type, Tuple, - TYPE_CHECKING, ) -import logging try: from urllib.parse import parse_qs, quote @@ -38,13 +32,14 @@ DistributedTracingPolicy, HttpLoggingPolicy, UserAgentPolicy, - AzureSasCredentialPolicy, + AzureSasCredentialPolicy ) from .constants import STORAGE_OAUTH_SCOPE, SERVICE_HOST_BASE, CONNECTION_TIMEOUT, READ_TIMEOUT from .models import LocationMode from .authentication import SharedKeyCredentialPolicy from .shared_access_signature import QueryStringConstants +from .request_handlers import serialize_batch_body, _get_batch_request_delimiter from .policies import ( StorageHeadersPolicy, StorageContentValidation, @@ -56,19 +51,17 @@ ExponentialRetry, ) from .._version import VERSION -from .._generated.models import StorageErrorException from .response_handlers import process_storage_error, PartialBatchErrorException _LOGGER = logging.getLogger(__name__) _SERVICE_PARAMS = { - "blob": {"primary": "BlobEndpoint", "secondary": "BlobSecondaryEndpoint"}, - "queue": {"primary": "QueueEndpoint", "secondary": "QueueSecondaryEndpoint"}, - "file": {"primary": "FileEndpoint", "secondary": "FileSecondaryEndpoint"}, - "dfs": {"primary": "BlobEndpoint", "secondary": "BlobEndpoint"}, + "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, + "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, + "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, + "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, } - class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes def __init__( self, @@ -263,33 +256,52 @@ def _create_pipeline(self, credential, **kwargs): return config, Pipeline(config.transport, policies=policies) def _batch_send( - self, *reqs, # type: HttpRequest + self, + *reqs, # type: HttpRequest **kwargs ): """Given a series of request, do a Storage batch call. """ # Pop it here, so requests doesn't feel bad about additional kwarg raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + batch_id = str(uuid.uuid1()) + request = self._client._client.post( # pylint: disable=protected-access - url='https://{}/?comp=batch'.format(self.primary_hostname), + url='{}://{}/{}?{}comp=batch{}{}'.format( + self.scheme, + self.primary_hostname, + kwargs.pop('path', ""), + kwargs.pop('restype', ""), + kwargs.pop('sas', ""), + kwargs.pop('timeout', "") + ), headers={ - 'x-ms-version': self.api_version + 'x-ms-version': self.api_version, + "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) } ) + policies = [StorageHeadersPolicy()] + if self._credential_policy: + policies.append(self._credential_policy) + request.set_multipart_mixed( *reqs, - policies=[ - StorageHeadersPolicy(), - self._credential_policy - ], + policies=policies, enforce_https=False ) + Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access + body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) + request.set_bytes_body(body) + + temp = request.multipart_mixed_info + request.multipart_mixed_info = None pipeline_response = self._pipeline.run( request, **kwargs ) response = pipeline_response.http_response + request.multipart_mixed_info = temp try: if response.status_code not in [202]: @@ -305,7 +317,7 @@ def _batch_send( raise error return iter(parts) return parts - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) class TransportWrapper(HttpTransport): @@ -351,15 +363,15 @@ def parse_connection_str(conn_str, credential, service): conn_settings = [s.split("=", 1) for s in conn_str.split(";")] if any(len(tup) != 2 for tup in conn_settings): raise ValueError("Connection string is either blank or malformed.") - conn_settings = dict(conn_settings) + conn_settings = dict((key.upper(), val) for key, val in conn_settings) endpoints = _SERVICE_PARAMS[service] primary = None secondary = None if not credential: try: - credential = {"account_name": conn_settings["AccountName"], "account_key": conn_settings["AccountKey"]} + credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} except KeyError: - credential = conn_settings.get("SharedAccessSignature") + credential = conn_settings.get("SHAREDACCESSSIGNATURE") if endpoints["primary"] in conn_settings: primary = conn_settings[endpoints["primary"]] if endpoints["secondary"] in conn_settings: @@ -369,13 +381,13 @@ def parse_connection_str(conn_str, credential, service): raise ValueError("Connection string specifies only secondary endpoint.") try: primary = "{}://{}.{}.{}".format( - conn_settings["DefaultEndpointsProtocol"], - conn_settings["AccountName"], + conn_settings["DEFAULTENDPOINTSPROTOCOL"], + conn_settings["ACCOUNTNAME"], service, - conn_settings["EndpointSuffix"], + conn_settings["ENDPOINTSUFFIX"], ) secondary = "{}-secondary.{}.{}".format( - conn_settings["AccountName"], service, conn_settings["EndpointSuffix"] + conn_settings["ACCOUNTNAME"], service, conn_settings["ENDPOINTSUFFIX"] ) except KeyError: pass @@ -383,7 +395,7 @@ def parse_connection_str(conn_str, credential, service): if not primary: try: primary = "https://{}.{}.{}".format( - conn_settings["AccountName"], service, conn_settings.get("EndpointSuffix", SERVICE_HOST_BASE) + conn_settings["ACCOUNTNAME"], service, conn_settings.get("ENDPOINTSUFFIX", SERVICE_HOST_BASE) ) except KeyError: raise ValueError("Connection string missing required connection details.") @@ -412,6 +424,9 @@ def create_configuration(**kwargs): # Page blob uploads config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) + # Datalake file uploads + config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) + # Blob downloads config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client_async.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client_async.py index aea9b85..16eb6de 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client_async.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/base_client_async.py @@ -35,7 +35,6 @@ ) from .policies_async import AsyncStorageResponseHook -from .._generated.models import StorageErrorException from .response_handlers import process_storage_error, PartialBatchErrorException if TYPE_CHECKING: @@ -155,7 +154,7 @@ async def _batch_send( raise error return AsyncList(parts_list) return parts - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/constants.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/constants.py index 7fb05b5..f2695b5 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/constants.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/constants.py @@ -5,10 +5,10 @@ # -------------------------------------------------------------------------- import sys -from .._generated.version import VERSION +from .._generated import AzureQueueStorage -X_MS_VERSION = VERSION +X_MS_VERSION = AzureQueueStorage(url="get_api_version")._config.version # pylint: disable=protected-access # Socket timeout in seconds CONNECTION_TIMEOUT = 20 diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/encryption.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/encryption.py index 62607cc..439db5b 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/encryption.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/encryption.py @@ -520,6 +520,7 @@ def decrypt_queue_message(message, response, require_encryption, key_encryption_ :return: The plain text message from the queue message. :rtype: str ''' + response = response.http_response try: message = loads(message) diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/request_handlers.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/request_handlers.py index 4f15b65..37354d7 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/request_handlers.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/request_handlers.py @@ -20,6 +20,10 @@ _LOGGER = logging.getLogger(__name__) +_REQUEST_DELIMITER_PREFIX = "batch_" +_HTTP1_1_IDENTIFIER = "HTTP/1.1" +_HTTP_LINE_ENDING = "\r\n" + def serialize_iso(attr): """Serialize Datetime object into ISO-8601 formatted string. @@ -145,3 +149,125 @@ def add_metadata_headers(metadata=None): for key, value in metadata.items(): headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value return headers + + +def serialize_batch_body(requests, batch_id): + """ + -- + + -- + (repeated as needed) + ---- + + Serializes the requests in this batch to a single HTTP mixed/multipart body. + + :param list[~azure.core.pipeline.transport.HttpRequest] requests: + a list of sub-request for the batch request + :param str batch_id: + to be embedded in batch sub-request delimiter + :return: The body bytes for this batch. + """ + + if requests is None or len(requests) == 0: + raise ValueError('Please provide sub-request(s) for this batch request') + + delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') + newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') + batch_body = list() + + content_index = 0 + for request in requests: + request.headers.update({ + "Content-ID": str(content_index), + "Content-Length": str(0) + }) + batch_body.append(delimiter_bytes) + batch_body.append(_make_body_from_sub_request(request)) + batch_body.append(newline_bytes) + content_index += 1 + + batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) + # final line of body MUST have \r\n at the end, or it will not be properly read by the service + batch_body.append(newline_bytes) + + return bytes().join(batch_body) + + +def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): + """ + Gets the delimiter used for this batch request's mixed/multipart HTTP format. + + :param str batch_id: + Randomly generated id + :param bool is_prepend_dashes: + Whether to include the starting dashes. Used in the body, but non on defining the delimiter. + :param bool is_append_dashes: + Whether to include the ending dashes. Used in the body on the closing delimiter only. + :return: The delimiter, WITHOUT a trailing newline. + """ + + prepend_dashes = '--' if is_prepend_dashes else '' + append_dashes = '--' if is_append_dashes else '' + + return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes + + +def _make_body_from_sub_request(sub_request): + """ + Content-Type: application/http + Content-ID: + Content-Transfer-Encoding: (if present) + + HTTP/ +
:
(repeated as necessary) + Content-Length: + (newline if content length > 0) + (if content length > 0) + + Serializes an http request. + + :param ~azure.core.pipeline.transport.HttpRequest sub_request: + Request to serialize. + :return: The serialized sub-request in bytes + """ + + # put the sub-request's headers into a list for efficient str concatenation + sub_request_body = list() + + # get headers for ease of manipulation; remove headers as they are used + headers = sub_request.headers + + # append opening headers + sub_request_body.append("Content-Type: application/http") + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-ID: ") + sub_request_body.append(headers.pop("Content-ID", "")) + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-Transfer-Encoding: binary") + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + # append HTTP verb and path and query and HTTP version + sub_request_body.append(sub_request.method) + sub_request_body.append(' ') + sub_request_body.append(sub_request.url) + sub_request_body.append(' ') + sub_request_body.append(_HTTP1_1_IDENTIFIER) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) + for header_name, header_value in headers.items(): + if header_value is not None: + sub_request_body.append(header_name) + sub_request_body.append(": ") + sub_request_body.append(header_value) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + return ''.join(sub_request_body).encode() diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/response_handlers.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/response_handlers.py index 5df2f5c..aee171a 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/response_handlers.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/response_handlers.py @@ -3,7 +3,6 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- - from typing import ( # pylint: disable=unused-import Union, Optional, Any, Iterable, Dict, List, Type, Tuple, TYPE_CHECKING @@ -80,11 +79,14 @@ def return_headers_and_deserialized(response, deserialized, response_headers): def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument - return response.location_mode, deserialized + return response.http_response.location_mode, deserialized def process_storage_error(storage_error): raise_error = HttpResponseError + # If the status code is 200 or 204 then it has already been deserialized. + if storage_error.response.status_code in [200, 204]: + raise storage_error error_code = storage_error.response.headers.get('x-ms-error-code') error_message = storage_error.message additional_data = {} @@ -131,7 +133,6 @@ def process_storage_error(storage_error): StorageErrorCode.share_being_deleted]: raise_error = ResourceExistsError except ValueError: - # Got an unknown error code pass try: diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads.py index abf3fb2..1b619df 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads.py @@ -77,13 +77,13 @@ def upload_data_chunks( validate_content=validate_content, **kwargs) if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_chunk_streams() - running_futures = [ - executor.submit(with_current_context(uploader.process_chunk), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + executor.submit(with_current_context(uploader.process_chunk), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) else: range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] if any(range_ids): @@ -112,16 +112,18 @@ def upload_substream_blocks( **kwargs) if parallel: - executor = futures.ThreadPoolExecutor(max_concurrency) - upload_tasks = uploader.get_substream_blocks() - running_futures = [ - executor.submit(with_current_context(uploader.process_substream_block), u) - for u in islice(upload_tasks, 0, max_concurrency) - ] - range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + executor.submit(with_current_context(uploader.process_substream_block), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) else: range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] - return sorted(range_ids) + if any(range_ids): + return sorted(range_ids) + return [] class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes @@ -221,16 +223,16 @@ def get_substream_blocks(self): for i in range(blocks): index = i * self.chunk_size length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) + yield index, SubStream(self.stream, index, length, lock) def process_substream_block(self, block_data): return self._upload_substream_block_with_progress(block_data[0], block_data[1]) - def _upload_substream_block(self, block_id, block_stream): + def _upload_substream_block(self, index, block_stream): raise NotImplementedError("Must be implemented by child class.") - def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = self._upload_substream_block(block_id, block_stream) + def _upload_substream_block_with_progress(self, index, block_stream): + range_id = self._upload_substream_block(index, block_stream) self._update_progress(len(block_stream)) return range_id @@ -260,8 +262,9 @@ def _upload_chunk(self, chunk_offset, chunk_data): ) return index, block_id - def _upload_substream_block(self, block_id, block_stream): + def _upload_substream_block(self, index, block_stream): try: + block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) self.service.stage_block( block_id, len(block_stream), @@ -289,7 +292,7 @@ def _upload_chunk(self, chunk_offset, chunk_data): content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end) computed_md5 = None self.response_headers = self.service.upload_pages( - chunk_data, + body=chunk_data, content_length=len(chunk_data), transactional_content_md5=computed_md5, range=content_range, @@ -302,6 +305,9 @@ def _upload_chunk(self, chunk_offset, chunk_data): if not self.parallel and self.request_options.get('modified_access_conditions'): self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + def _upload_substream_block(self, index, block_stream): + pass + class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method @@ -312,7 +318,7 @@ def __init__(self, *args, **kwargs): def _upload_chunk(self, chunk_offset, chunk_data): if self.current_length is None: self.response_headers = self.service.append_block( - chunk_data, + body=chunk_data, content_length=len(chunk_data), cls=return_response_headers, data_stream_total=self.total_size, @@ -324,7 +330,7 @@ def _upload_chunk(self, chunk_offset, chunk_data): self.request_options['append_position_access_conditions'].append_position = \ self.current_length + chunk_offset self.response_headers = self.service.append_block( - chunk_data, + body=chunk_data, content_length=len(chunk_data), cls=return_response_headers, data_stream_total=self.total_size, @@ -332,6 +338,41 @@ def _upload_chunk(self, chunk_offset, chunk_data): **self.request_options ) + def _upload_substream_block(self, index, block_stream): + pass + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + self.response_headers = self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + def _upload_substream_block(self, index, block_stream): + try: + self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method @@ -348,6 +389,10 @@ def _upload_chunk(self, chunk_offset, chunk_data): ) return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response + # TODO: Implement this method. + def _upload_substream_block(self, index, block_stream): + pass + class SubStream(IOBase): @@ -432,6 +477,13 @@ def read(self, size=None): raise IOError("Stream failed to seek to the desired location.") buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) else: + absolute_position = self._stream_begin_index + self._position + # It's possible that there's connection problem during data transfer, + # so when we retry we don't want to read from current position of wrapped stream, + # instead we should seek to where we want to read from. + if self._wrapped_stream.tell() != absolute_position: + self._wrapped_stream.seek(absolute_position, SEEK_SET) + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) if buffer_from_stream: diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads_async.py b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads_async.py index fe68a2b..5ed192b 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads_async.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_shared/uploads_async.py @@ -124,7 +124,9 @@ async def upload_substream_blocks( range_ids = [] for block in uploader.get_substream_blocks(): range_ids.append(await uploader.process_substream_block(block)) - return sorted(range_ids) + if any(range_ids): + return sorted(range_ids) + return class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes @@ -224,16 +226,16 @@ def get_substream_blocks(self): for i in range(blocks): index = i * self.chunk_size length = last_block_size if i == blocks - 1 else self.chunk_size - yield ('BlockId{}'.format("%05d" % i), SubStream(self.stream, index, length, lock)) + yield index, SubStream(self.stream, index, length, lock) async def process_substream_block(self, block_data): return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) - async def _upload_substream_block(self, block_id, block_stream): + async def _upload_substream_block(self, index, block_stream): raise NotImplementedError("Must be implemented by child class.") - async def _upload_substream_block_with_progress(self, block_id, block_stream): - range_id = await self._upload_substream_block(block_id, block_stream) + async def _upload_substream_block_with_progress(self, index, block_stream): + range_id = await self._upload_substream_block(index, block_stream) await self._update_progress(len(block_stream)) return range_id @@ -256,14 +258,15 @@ async def _upload_chunk(self, chunk_offset, chunk_data): await self.service.stage_block( block_id, len(chunk_data), - chunk_data, + body=chunk_data, data_stream_total=self.total_size, upload_stream_current=self.progress_total, **self.request_options) return index, block_id - async def _upload_substream_block(self, block_id, block_stream): + async def _upload_substream_block(self, index, block_stream): try: + block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size)) await self.service.stage_block( block_id, len(block_stream), @@ -293,7 +296,7 @@ async def _upload_chunk(self, chunk_offset, chunk_data): content_range = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) computed_md5 = None self.response_headers = await self.service.upload_pages( - chunk_data, + body=chunk_data, content_length=len(chunk_data), transactional_content_md5=computed_md5, range=content_range, @@ -305,6 +308,9 @@ async def _upload_chunk(self, chunk_offset, chunk_data): if not self.parallel and self.request_options.get('modified_access_conditions'): self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + async def _upload_substream_block(self, index, block_stream): + pass + class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method @@ -315,7 +321,7 @@ def __init__(self, *args, **kwargs): async def _upload_chunk(self, chunk_offset, chunk_data): if self.current_length is None: self.response_headers = await self.service.append_block( - chunk_data, + body=chunk_data, content_length=len(chunk_data), cls=return_response_headers, data_stream_total=self.total_size, @@ -326,25 +332,64 @@ async def _upload_chunk(self, chunk_offset, chunk_data): self.request_options['append_position_access_conditions'].append_position = \ self.current_length + chunk_offset self.response_headers = await self.service.append_block( - chunk_data, + body=chunk_data, content_length=len(chunk_data), cls=return_response_headers, data_stream_total=self.total_size, upload_stream_current=self.progress_total, **self.request_options) + async def _upload_substream_block(self, index, block_stream): + pass + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + self.response_headers = await self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + async def _upload_substream_block(self, index, block_stream): + try: + await self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method async def _upload_chunk(self, chunk_offset, chunk_data): - chunk_end = chunk_offset + len(chunk_data) - 1 + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 response = await self.service.upload_range( chunk_data, chunk_offset, - chunk_end, + length, data_stream_total=self.total_size, upload_stream_current=self.progress_total, **self.request_options ) range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_end) return range_id, response + + # TODO: Implement this method. + async def _upload_substream_block(self, index, block_stream): + pass diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/_version.py b/azure/multiapi/storagev2/queue/v2018_03_28/_version.py index e283b15..c3c2d29 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/_version.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/_version.py @@ -9,4 +9,4 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "12.1.5" +VERSION = "12.1.6" diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/aio/_models.py b/azure/multiapi/storagev2/queue/v2018_03_28/aio/_models.py index 4e4577d..cb90836 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/aio/_models.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/aio/_models.py @@ -8,10 +8,10 @@ from typing import List # pylint: disable=unused-import from azure.core.async_paging import AsyncPageIterator +from azure.core.exceptions import HttpResponseError from .._shared.response_handlers import ( process_storage_error, return_context_and_deserialized) -from .._generated.models import StorageErrorException from .._models import QueueMessage, QueueProperties @@ -36,7 +36,7 @@ def __init__(self, command, results_per_page=None, continuation_token=None): async def _get_next_cb(self, continuation_token): try: return await self._command(number_of_messages=self.results_per_page) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) async def _extract_data_cb(self, messages): @@ -83,7 +83,7 @@ async def _get_next_cb(self, continuation_token): maxresults=self.results_per_page, cls=return_context_and_deserialized, use_location=self.location_mode) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) async def _extract_data_cb(self, get_next_return): diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/aio/_queue_client_async.py b/azure/multiapi/storagev2/queue/v2018_03_28/aio/_queue_client_async.py index b9b611e..34ecba6 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/aio/_queue_client_async.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/aio/_queue_client_async.py @@ -25,6 +25,7 @@ from urlparse import urlparse # type: ignore from urllib2 import quote, unquote # type: ignore +from azure.core.exceptions import HttpResponseError from azure.core.tracing.decorator import distributed_trace from azure.core.tracing.decorator_async import distributed_trace_async @@ -38,9 +39,8 @@ return_headers_and_deserialized, ) from .._deserialize import deserialize_queue_properties, deserialize_queue_creation -from .._generated.version import VERSION from .._generated.aio import AzureQueueStorage -from .._generated.models import StorageErrorException, SignedIdentifier +from .._generated.models import SignedIdentifier from .._generated.models import QueueMessage as GenQueueMessage from .._models import QueueMessage, AccessPolicy @@ -111,7 +111,8 @@ def __init__( account_url, queue_name=queue_name, credential=credential, loop=loop, **kwargs ) self._client = AzureQueueStorage(self.url, pipeline=self._pipeline, loop=loop) # type: ignore - self._client._config.version = kwargs.get('api_version', VERSION) # pylint: disable=protected-access + default_api_version = self._client._config.version # pylint: disable=protected-access + self._client._config.version = kwargs.get('api_version', default_api_version) # pylint: disable=protected-access self._loop = loop @distributed_trace_async @@ -149,7 +150,7 @@ async def create_queue(self, **kwargs): return await self._client.queue.create( # type: ignore metadata=metadata, timeout=timeout, headers=headers, cls=deserialize_queue_creation, **kwargs ) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace_async @@ -181,7 +182,7 @@ async def delete_queue(self, **kwargs): timeout = kwargs.pop('timeout', None) try: await self._client.queue.delete(timeout=timeout, **kwargs) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace_async @@ -210,7 +211,7 @@ async def get_queue_properties(self, **kwargs): response = await self._client.queue.get_properties( timeout=timeout, cls=deserialize_queue_properties, **kwargs ) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) response.name = self.queue_name return response # type: ignore @@ -245,7 +246,7 @@ async def set_queue_metadata(self, metadata=None, **kwargs): return await self._client.queue.set_metadata( # type: ignore timeout=timeout, headers=headers, cls=return_response_headers, **kwargs ) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace_async @@ -264,7 +265,7 @@ async def get_queue_access_policy(self, **kwargs): _, identifiers = await self._client.queue.get_access_policy( timeout=timeout, cls=return_headers_and_deserialized, **kwargs ) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) return {s.id: s.access_policy or AccessPolicy() for s in identifiers} @@ -317,7 +318,7 @@ async def set_queue_access_policy(self, signed_identifiers, **kwargs): signed_identifiers = identifiers # type: ignore try: await self._client.queue.set_access_policy(queue_acl=signed_identifiers or None, timeout=timeout, **kwargs) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace_async @@ -398,7 +399,7 @@ async def send_message( # type: ignore queue_message.pop_receipt = enqueued[0].pop_receipt queue_message.next_visible_on = enqueued[0].time_next_visible return queue_message - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace_async @@ -454,7 +455,7 @@ async def receive_message(self, **kwargs): wrapped_message = QueueMessage._from_generated( # pylint: disable=protected-access message[0]) if message != [] else None return wrapped_message - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace @@ -517,7 +518,7 @@ def receive_messages(self, **kwargs): **kwargs ) return AsyncItemPaged(command, results_per_page=messages_per_page, page_iterator_class=MessagesPaged) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace_async @@ -620,7 +621,7 @@ async def update_message( new_message.pop_receipt = response["popreceipt"] new_message.next_visible_on = response["time_next_visible"] return new_message - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace_async @@ -677,7 +678,7 @@ async def peek_messages(self, max_messages=None, **kwargs): for peeked in messages: wrapped_messages.append(QueueMessage._from_generated(peeked)) # pylint: disable=protected-access return wrapped_messages - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace_async @@ -700,7 +701,7 @@ async def clear_messages(self, **kwargs): timeout = kwargs.pop('timeout', None) try: await self._client.messages.clear(timeout=timeout, **kwargs) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace_async @@ -750,5 +751,5 @@ async def delete_message(self, message, pop_receipt=None, **kwargs): await self._client.message_id.delete( pop_receipt=receipt, timeout=timeout, queue_message_id=message_id, **kwargs ) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) diff --git a/azure/multiapi/storagev2/queue/v2018_03_28/aio/_queue_service_client_async.py b/azure/multiapi/storagev2/queue/v2018_03_28/aio/_queue_service_client_async.py index 63c11ff..805ed41 100644 --- a/azure/multiapi/storagev2/queue/v2018_03_28/aio/_queue_service_client_async.py +++ b/azure/multiapi/storagev2/queue/v2018_03_28/aio/_queue_service_client_async.py @@ -14,6 +14,7 @@ except ImportError: from urlparse import urlparse # type: ignore +from azure.core.exceptions import HttpResponseError from azure.core.async_paging import AsyncItemPaged from azure.core.tracing.decorator import distributed_trace from azure.core.pipeline import AsyncPipeline @@ -24,9 +25,8 @@ from .._shared.models import LocationMode from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper from .._shared.response_handlers import process_storage_error -from .._generated.version import VERSION from .._generated.aio import AzureQueueStorage -from .._generated.models import StorageServiceProperties, StorageErrorException +from .._generated.models import StorageServiceProperties from ._models import QueuePropertiesPaged from ._queue_client_async import QueueClient @@ -101,7 +101,8 @@ def __init__( loop=loop, **kwargs) self._client = AzureQueueStorage(url=self.url, pipeline=self._pipeline, loop=loop) # type: ignore - self._client._config.version = kwargs.get('api_version', VERSION) # pylint: disable=protected-access + default_api_version = self._client._config.version # pylint: disable=protected-access + self._client._config.version = kwargs.get('api_version', default_api_version) # pylint: disable=protected-access self._loop = loop @distributed_trace_async @@ -135,7 +136,7 @@ async def get_service_stats(self, **kwargs): stats = await self._client.service.get_statistics( # type: ignore timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) return service_stats_deserialize(stats) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace_async @@ -163,7 +164,7 @@ async def get_service_properties(self, **kwargs): try: service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) # type: ignore return service_properties_deserialize(service_props) - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace_async @@ -219,7 +220,7 @@ async def set_service_properties( # type: ignore ) try: return await self._client.service.set_properties(props, timeout=timeout, **kwargs) # type: ignore - except StorageErrorException as error: + except HttpResponseError as error: process_storage_error(error) @distributed_trace diff --git a/setup.py b/setup.py index 4ce2252..53b5fc4 100644 --- a/setup.py +++ b/setup.py @@ -35,7 +35,7 @@ setup( name='azure-multiapi-storage', - version='0.6.2', + version='0.7.0', description='Microsoft Azure Storage Client Library for Python with multi API version support.', long_description=open('README.rst', 'r').read(), license='MIT',