2014-08-27 15:17:06 +00:00
|
|
|
from __future__ import unicode_literals
|
2013-03-26 14:52:33 +00:00
|
|
|
import os
|
|
|
|
import base64
|
2013-03-29 21:45:33 +00:00
|
|
|
import datetime
|
2013-07-08 16:46:36 +00:00
|
|
|
import hashlib
|
2014-03-26 15:41:07 +00:00
|
|
|
import copy
|
2014-06-27 22:21:32 +00:00
|
|
|
import itertools
|
2014-08-26 17:25:50 +00:00
|
|
|
import codecs
|
|
|
|
import six
|
2013-02-18 21:09:40 +00:00
|
|
|
|
|
|
|
from moto.core import BaseBackend
|
2014-11-30 04:34:40 +00:00
|
|
|
from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime
|
2014-07-09 01:20:29 +00:00
|
|
|
from .exceptions import BucketAlreadyExists, MissingBucket
|
2014-06-27 21:37:51 +00:00
|
|
|
from .utils import clean_key_name, _VersionedKeyStore
|
2013-02-18 21:09:40 +00:00
|
|
|
|
2014-03-17 01:25:14 +00:00
|
|
|
UPLOAD_ID_BYTES = 43
|
|
|
|
UPLOAD_PART_MIN_SIZE = 5242880
|
2013-11-15 09:53:39 +00:00
|
|
|
|
2013-02-18 21:09:40 +00:00
|
|
|
|
|
|
|
class FakeKey(object):
|
2014-06-27 17:34:00 +00:00
|
|
|
|
|
|
|
def __init__(self, name, value, storage="STANDARD", etag=None, is_versioned=False, version_id=0):
|
2013-02-18 21:09:40 +00:00
|
|
|
self.name = name
|
|
|
|
self.value = value
|
2014-09-12 22:37:22 +00:00
|
|
|
self.last_modified = datetime.datetime.utcnow()
|
2014-03-26 15:52:31 +00:00
|
|
|
self._storage_class = storage
|
2013-05-14 17:47:24 +00:00
|
|
|
self._metadata = {}
|
2014-03-26 17:15:08 +00:00
|
|
|
self._expiry = None
|
2014-04-02 11:40:04 +00:00
|
|
|
self._etag = etag
|
2014-06-27 17:34:00 +00:00
|
|
|
self._version_id = version_id
|
|
|
|
self._is_versioned = is_versioned
|
2013-05-17 23:41:39 +00:00
|
|
|
|
2014-03-30 15:50:36 +00:00
|
|
|
def copy(self, new_name=None):
|
2014-03-26 15:41:07 +00:00
|
|
|
r = copy.deepcopy(self)
|
|
|
|
if new_name is not None:
|
|
|
|
r.name = new_name
|
|
|
|
return r
|
|
|
|
|
2014-12-07 17:43:14 +00:00
|
|
|
def set_metadata(self, metadata, replace=False):
|
|
|
|
if replace:
|
|
|
|
self._metadata = {}
|
|
|
|
self._metadata.update(metadata)
|
2014-03-26 14:02:14 +00:00
|
|
|
|
2014-03-26 15:52:31 +00:00
|
|
|
def set_storage_class(self, storage_class):
|
|
|
|
self._storage_class = storage_class
|
|
|
|
|
2013-05-07 03:33:59 +00:00
|
|
|
def append_to_value(self, value):
|
|
|
|
self.value += value
|
2014-09-12 22:37:22 +00:00
|
|
|
self.last_modified = datetime.datetime.utcnow()
|
2014-04-02 11:40:04 +00:00
|
|
|
self._etag = None # must recalculate etag
|
2014-06-27 17:34:00 +00:00
|
|
|
if self._is_versioned:
|
|
|
|
self._version_id += 1
|
|
|
|
else:
|
|
|
|
self._is_versioned = 0
|
2013-02-18 21:09:40 +00:00
|
|
|
|
2014-03-26 17:15:08 +00:00
|
|
|
def restore(self, days):
|
2014-09-12 22:37:22 +00:00
|
|
|
self._expiry = datetime.datetime.utcnow() + datetime.timedelta(days)
|
2014-03-26 17:15:08 +00:00
|
|
|
|
2013-02-18 21:09:40 +00:00
|
|
|
@property
|
|
|
|
def etag(self):
|
2014-04-02 11:40:04 +00:00
|
|
|
if self._etag is None:
|
|
|
|
value_md5 = hashlib.md5()
|
2014-08-26 17:25:50 +00:00
|
|
|
if isinstance(self.value, six.text_type):
|
|
|
|
value = self.value.encode("utf-8")
|
|
|
|
else:
|
|
|
|
value = self.value
|
|
|
|
value_md5.update(value)
|
2014-04-02 11:40:04 +00:00
|
|
|
self._etag = value_md5.hexdigest()
|
|
|
|
return '"{0}"'.format(self._etag)
|
2013-02-18 21:09:40 +00:00
|
|
|
|
2013-03-29 21:45:33 +00:00
|
|
|
@property
|
|
|
|
def last_modified_ISO8601(self):
|
2014-11-30 04:34:40 +00:00
|
|
|
return iso_8601_datetime_with_milliseconds(self.last_modified)
|
2013-03-29 21:45:33 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def last_modified_RFC1123(self):
|
|
|
|
# Different datetime formats depending on how the key is obtained
|
|
|
|
# https://github.com/boto/boto/issues/466
|
2013-05-24 21:22:34 +00:00
|
|
|
return rfc_1123_datetime(self.last_modified)
|
2013-05-17 23:41:39 +00:00
|
|
|
|
2013-05-14 17:47:24 +00:00
|
|
|
@property
|
|
|
|
def metadata(self):
|
|
|
|
return self._metadata
|
2013-03-29 21:45:33 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def response_dict(self):
|
2014-03-26 15:52:31 +00:00
|
|
|
r = {
|
2013-03-29 21:45:33 +00:00
|
|
|
'etag': self.etag,
|
|
|
|
'last-modified': self.last_modified_RFC1123,
|
|
|
|
}
|
2014-03-30 15:50:36 +00:00
|
|
|
if self._storage_class != 'STANDARD':
|
|
|
|
r['x-amz-storage-class'] = self._storage_class
|
2014-03-26 17:15:08 +00:00
|
|
|
if self._expiry is not None:
|
2014-03-27 08:00:50 +00:00
|
|
|
rhdr = 'ongoing-request="false", expiry-date="{0}"'
|
|
|
|
r['x-amz-restore'] = rhdr.format(self.expiry_date)
|
2014-06-27 17:34:00 +00:00
|
|
|
|
|
|
|
if self._is_versioned:
|
|
|
|
r['x-amz-version-id'] = self._version_id
|
|
|
|
|
2014-03-26 15:52:31 +00:00
|
|
|
return r
|
2013-03-29 21:45:33 +00:00
|
|
|
|
2013-02-27 06:12:11 +00:00
|
|
|
@property
|
|
|
|
def size(self):
|
|
|
|
return len(self.value)
|
|
|
|
|
2014-03-26 15:52:31 +00:00
|
|
|
@property
|
|
|
|
def storage_class(self):
|
2014-03-30 15:50:36 +00:00
|
|
|
return self._storage_class
|
2014-03-26 15:52:31 +00:00
|
|
|
|
2014-03-26 17:15:08 +00:00
|
|
|
@property
|
|
|
|
def expiry_date(self):
|
|
|
|
if self._expiry is not None:
|
|
|
|
return self._expiry.strftime("%a, %d %b %Y %H:%M:%S GMT")
|
|
|
|
|
2013-02-26 05:31:01 +00:00
|
|
|
|
2013-03-26 14:52:33 +00:00
|
|
|
class FakeMultipart(object):
|
2014-12-07 17:43:14 +00:00
|
|
|
def __init__(self, key_name, metadata):
|
2013-03-26 14:52:33 +00:00
|
|
|
self.key_name = key_name
|
2014-12-07 17:43:14 +00:00
|
|
|
self.metadata = metadata
|
2013-03-26 14:52:33 +00:00
|
|
|
self.parts = {}
|
2014-08-26 17:25:50 +00:00
|
|
|
rand_b64 = base64.b64encode(os.urandom(UPLOAD_ID_BYTES))
|
|
|
|
self.id = rand_b64.decode('utf-8').replace('=', '').replace('+', '')
|
2013-09-30 11:50:23 +00:00
|
|
|
|
2013-03-26 14:52:33 +00:00
|
|
|
def complete(self):
|
2014-08-26 17:25:50 +00:00
|
|
|
decode_hex = codecs.getdecoder("hex_codec")
|
2013-03-26 14:52:33 +00:00
|
|
|
total = bytearray()
|
2014-04-02 11:40:04 +00:00
|
|
|
md5s = bytearray()
|
2013-11-07 22:09:53 +00:00
|
|
|
last_part_name = len(self.list_parts())
|
2013-03-26 14:52:33 +00:00
|
|
|
|
2013-09-30 09:09:35 +00:00
|
|
|
for part in self.list_parts():
|
2013-11-15 09:53:39 +00:00
|
|
|
if part.name != last_part_name and len(part.value) < UPLOAD_PART_MIN_SIZE:
|
2014-04-02 11:40:04 +00:00
|
|
|
return None, None
|
2014-08-26 17:25:50 +00:00
|
|
|
part_etag = part.etag.replace('"', '')
|
|
|
|
md5s.extend(decode_hex(part_etag)[0])
|
2013-09-30 09:09:35 +00:00
|
|
|
total.extend(part.value)
|
2013-03-26 14:52:33 +00:00
|
|
|
|
2014-04-02 11:40:04 +00:00
|
|
|
etag = hashlib.md5()
|
|
|
|
etag.update(bytes(md5s))
|
2014-04-02 13:34:29 +00:00
|
|
|
return total, "{0}-{1}".format(etag.hexdigest(), last_part_name)
|
2013-03-26 14:52:33 +00:00
|
|
|
|
|
|
|
def set_part(self, part_id, value):
|
|
|
|
if part_id < 1:
|
2013-03-26 15:50:18 +00:00
|
|
|
return
|
2013-03-26 14:52:33 +00:00
|
|
|
|
2013-03-26 15:50:18 +00:00
|
|
|
key = FakeKey(part_id, value)
|
|
|
|
self.parts[part_id] = key
|
|
|
|
return key
|
2013-03-26 14:52:33 +00:00
|
|
|
|
2013-09-30 09:09:35 +00:00
|
|
|
def list_parts(self):
|
|
|
|
parts = []
|
|
|
|
|
|
|
|
for part_id, index in enumerate(sorted(self.parts.keys()), start=1):
|
|
|
|
# Make sure part ids are continuous
|
|
|
|
if part_id != index:
|
|
|
|
return
|
|
|
|
parts.append(self.parts[part_id])
|
|
|
|
|
|
|
|
return parts
|
|
|
|
|
2013-03-26 14:52:33 +00:00
|
|
|
|
2013-02-18 21:09:40 +00:00
|
|
|
class FakeBucket(object):
|
2014-06-27 17:34:00 +00:00
|
|
|
|
2014-12-11 01:44:00 +00:00
|
|
|
def __init__(self, name, region_name):
|
2013-02-18 21:09:40 +00:00
|
|
|
self.name = name
|
2014-12-11 01:44:00 +00:00
|
|
|
self.region_name = region_name
|
2014-06-27 21:37:51 +00:00
|
|
|
self.keys = _VersionedKeyStore()
|
2013-03-26 14:52:33 +00:00
|
|
|
self.multiparts = {}
|
2014-06-27 17:34:00 +00:00
|
|
|
self.versioning_status = None
|
|
|
|
|
2014-12-11 01:44:00 +00:00
|
|
|
@property
|
|
|
|
def location(self):
|
|
|
|
return self.region_name
|
|
|
|
|
2014-06-27 17:34:00 +00:00
|
|
|
@property
|
|
|
|
def is_versioned(self):
|
|
|
|
return self.versioning_status == 'Enabled'
|
2013-02-18 21:09:40 +00:00
|
|
|
|
2014-10-21 16:45:03 +00:00
|
|
|
def get_cfn_attribute(self, attribute_name):
|
2014-10-21 18:51:26 +00:00
|
|
|
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
|
2014-10-21 16:45:03 +00:00
|
|
|
if attribute_name == 'DomainName':
|
|
|
|
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "DomainName" ]"')
|
|
|
|
elif attribute_name == 'WebsiteURL':
|
|
|
|
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "WebsiteURL" ]"')
|
2014-10-21 18:51:26 +00:00
|
|
|
raise UnformattedGetAttTemplateException()
|
2014-10-21 16:45:03 +00:00
|
|
|
|
2013-02-18 21:09:40 +00:00
|
|
|
|
2013-02-18 21:31:42 +00:00
|
|
|
class S3Backend(BaseBackend):
|
2013-02-18 21:09:40 +00:00
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.buckets = {}
|
|
|
|
|
2014-12-11 01:44:00 +00:00
|
|
|
def create_bucket(self, bucket_name, region_name):
|
2014-03-17 01:25:14 +00:00
|
|
|
if bucket_name in self.buckets:
|
|
|
|
raise BucketAlreadyExists()
|
2014-12-11 01:44:00 +00:00
|
|
|
new_bucket = FakeBucket(name=bucket_name, region_name=region_name)
|
2013-02-18 21:09:40 +00:00
|
|
|
self.buckets[bucket_name] = new_bucket
|
|
|
|
return new_bucket
|
|
|
|
|
2013-02-18 22:31:15 +00:00
|
|
|
def get_all_buckets(self):
|
|
|
|
return self.buckets.values()
|
|
|
|
|
2013-02-18 21:09:40 +00:00
|
|
|
def get_bucket(self, bucket_name):
|
2014-07-09 01:20:29 +00:00
|
|
|
try:
|
|
|
|
return self.buckets[bucket_name]
|
|
|
|
except KeyError:
|
|
|
|
raise MissingBucket()
|
2013-02-18 21:09:40 +00:00
|
|
|
|
2013-02-18 22:17:19 +00:00
|
|
|
def delete_bucket(self, bucket_name):
|
2014-07-09 01:20:29 +00:00
|
|
|
bucket = self.get_bucket(bucket_name)
|
|
|
|
if bucket.keys:
|
|
|
|
# Can't delete a bucket with keys
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
return self.buckets.pop(bucket_name)
|
2013-02-18 22:17:19 +00:00
|
|
|
|
2014-06-27 17:34:00 +00:00
|
|
|
def set_bucket_versioning(self, bucket_name, status):
|
2014-07-09 01:20:29 +00:00
|
|
|
self.get_bucket(bucket_name).versioning_status = status
|
2014-06-27 17:34:00 +00:00
|
|
|
|
|
|
|
def get_bucket_versioning(self, bucket_name):
|
2014-07-09 01:20:29 +00:00
|
|
|
return self.get_bucket(bucket_name).versioning_status
|
2014-06-27 17:34:00 +00:00
|
|
|
|
2014-06-27 22:21:32 +00:00
|
|
|
def get_bucket_versions(self, bucket_name, delimiter=None,
|
|
|
|
encoding_type=None,
|
|
|
|
key_marker=None,
|
|
|
|
max_keys=None,
|
|
|
|
version_id_marker=None):
|
2014-07-09 01:20:29 +00:00
|
|
|
bucket = self.get_bucket(bucket_name)
|
2014-06-27 22:21:32 +00:00
|
|
|
|
|
|
|
if any((delimiter, encoding_type, key_marker, version_id_marker)):
|
|
|
|
raise NotImplementedError(
|
|
|
|
"Called get_bucket_versions with some of delimiter, encoding_type, key_marker, version_id_marker")
|
|
|
|
|
|
|
|
return itertools.chain(*(l for _, l in bucket.keys.iterlists()))
|
2014-07-09 01:20:29 +00:00
|
|
|
|
2014-04-02 11:40:04 +00:00
|
|
|
def set_key(self, bucket_name, key_name, value, storage=None, etag=None):
|
2013-04-13 23:00:37 +00:00
|
|
|
key_name = clean_key_name(key_name)
|
|
|
|
|
2014-07-09 01:20:29 +00:00
|
|
|
bucket = self.get_bucket(bucket_name)
|
2014-06-27 17:34:00 +00:00
|
|
|
|
|
|
|
old_key = bucket.keys.get(key_name, None)
|
|
|
|
if old_key is not None and bucket.is_versioned:
|
|
|
|
new_version_id = old_key._version_id + 1
|
|
|
|
else:
|
|
|
|
new_version_id = 0
|
|
|
|
|
|
|
|
new_key = FakeKey(
|
|
|
|
name=key_name,
|
|
|
|
value=value,
|
|
|
|
storage=storage,
|
|
|
|
etag=etag,
|
|
|
|
is_versioned=bucket.is_versioned,
|
|
|
|
version_id=new_version_id)
|
2013-02-18 22:17:19 +00:00
|
|
|
bucket.keys[key_name] = new_key
|
2013-02-18 21:09:40 +00:00
|
|
|
|
|
|
|
return new_key
|
|
|
|
|
2013-05-07 03:33:59 +00:00
|
|
|
def append_to_key(self, bucket_name, key_name, value):
|
|
|
|
key_name = clean_key_name(key_name)
|
|
|
|
|
|
|
|
key = self.get_key(bucket_name, key_name)
|
|
|
|
key.append_to_value(value)
|
|
|
|
return key
|
|
|
|
|
2014-06-27 22:21:32 +00:00
|
|
|
def get_key(self, bucket_name, key_name, version_id=None):
|
2013-04-13 23:00:37 +00:00
|
|
|
key_name = clean_key_name(key_name)
|
2013-03-05 13:14:43 +00:00
|
|
|
bucket = self.get_bucket(bucket_name)
|
|
|
|
if bucket:
|
2014-06-27 22:21:32 +00:00
|
|
|
if version_id is None:
|
|
|
|
return bucket.keys.get(key_name)
|
|
|
|
else:
|
|
|
|
for key in bucket.keys.getlist(key_name):
|
|
|
|
if str(key._version_id) == str(version_id):
|
|
|
|
return key
|
2013-02-18 21:09:40 +00:00
|
|
|
|
2014-12-07 17:43:14 +00:00
|
|
|
def initiate_multipart(self, bucket_name, key_name, metadata):
|
2014-07-09 01:20:29 +00:00
|
|
|
bucket = self.get_bucket(bucket_name)
|
2014-12-07 17:43:14 +00:00
|
|
|
new_multipart = FakeMultipart(key_name, metadata)
|
2013-03-26 14:52:33 +00:00
|
|
|
bucket.multiparts[new_multipart.id] = new_multipart
|
|
|
|
|
|
|
|
return new_multipart
|
|
|
|
|
|
|
|
def complete_multipart(self, bucket_name, multipart_id):
|
2014-07-09 01:20:29 +00:00
|
|
|
bucket = self.get_bucket(bucket_name)
|
2013-03-26 14:52:33 +00:00
|
|
|
multipart = bucket.multiparts[multipart_id]
|
2014-04-02 11:40:04 +00:00
|
|
|
value, etag = multipart.complete()
|
2013-03-26 14:52:33 +00:00
|
|
|
if value is None:
|
2013-03-26 15:50:18 +00:00
|
|
|
return
|
|
|
|
del bucket.multiparts[multipart_id]
|
2013-03-26 14:52:33 +00:00
|
|
|
|
2014-12-07 17:43:14 +00:00
|
|
|
key = self.set_key(bucket_name, multipart.key_name, value, etag=etag)
|
|
|
|
key.set_metadata(multipart.metadata)
|
|
|
|
return key
|
2013-03-26 14:52:33 +00:00
|
|
|
|
2013-09-30 15:36:25 +00:00
|
|
|
def cancel_multipart(self, bucket_name, multipart_id):
|
2014-07-09 01:20:29 +00:00
|
|
|
bucket = self.get_bucket(bucket_name)
|
2013-09-30 15:36:25 +00:00
|
|
|
del bucket.multiparts[multipart_id]
|
|
|
|
|
2013-09-30 09:09:35 +00:00
|
|
|
def list_multipart(self, bucket_name, multipart_id):
|
2014-07-09 01:20:29 +00:00
|
|
|
bucket = self.get_bucket(bucket_name)
|
2013-09-30 09:09:35 +00:00
|
|
|
return bucket.multiparts[multipart_id].list_parts()
|
|
|
|
|
2014-04-02 16:03:40 +00:00
|
|
|
def get_all_multiparts(self, bucket_name):
|
2014-07-09 01:20:29 +00:00
|
|
|
bucket = self.get_bucket(bucket_name)
|
2014-04-02 16:03:40 +00:00
|
|
|
return bucket.multiparts
|
|
|
|
|
2013-03-26 14:52:33 +00:00
|
|
|
def set_part(self, bucket_name, multipart_id, part_id, value):
|
2014-07-09 01:20:29 +00:00
|
|
|
bucket = self.get_bucket(bucket_name)
|
2013-03-26 14:52:33 +00:00
|
|
|
multipart = bucket.multiparts[multipart_id]
|
|
|
|
return multipart.set_part(part_id, value)
|
|
|
|
|
2014-03-21 15:33:10 +00:00
|
|
|
def copy_part(self, dest_bucket_name, multipart_id, part_id,
|
|
|
|
src_bucket_name, src_key_name):
|
|
|
|
src_key_name = clean_key_name(src_key_name)
|
2014-07-09 01:20:29 +00:00
|
|
|
src_bucket = self.get_bucket(src_bucket_name)
|
|
|
|
dest_bucket = self.get_bucket(dest_bucket_name)
|
2014-03-21 15:33:10 +00:00
|
|
|
multipart = dest_bucket.multiparts[multipart_id]
|
|
|
|
return multipart.set_part(part_id, src_bucket.keys[src_key_name].value)
|
|
|
|
|
2013-04-13 23:23:32 +00:00
|
|
|
def prefix_query(self, bucket, prefix, delimiter):
|
2013-02-27 06:12:11 +00:00
|
|
|
key_results = set()
|
|
|
|
folder_results = set()
|
|
|
|
if prefix:
|
2014-08-26 17:25:50 +00:00
|
|
|
for key_name, key in bucket.keys.items():
|
2013-02-27 06:12:11 +00:00
|
|
|
if key_name.startswith(prefix):
|
2013-05-07 04:03:05 +00:00
|
|
|
key_without_prefix = key_name.replace(prefix, "", 1)
|
|
|
|
if delimiter and delimiter in key_without_prefix:
|
2013-04-13 23:23:32 +00:00
|
|
|
# If delimiter, we need to split out folder_results
|
2013-05-07 04:03:05 +00:00
|
|
|
key_without_delimiter = key_without_prefix.split(delimiter)[0]
|
2013-10-04 00:34:13 +00:00
|
|
|
folder_results.add("{0}{1}{2}".format(prefix, key_without_delimiter, delimiter))
|
2013-02-27 06:12:11 +00:00
|
|
|
else:
|
|
|
|
key_results.add(key)
|
|
|
|
else:
|
2014-08-26 17:25:50 +00:00
|
|
|
for key_name, key in bucket.keys.items():
|
2013-05-07 04:03:05 +00:00
|
|
|
if delimiter and delimiter in key_name:
|
2013-04-13 23:23:32 +00:00
|
|
|
# If delimiter, we need to split out folder_results
|
2015-01-29 17:20:40 +00:00
|
|
|
folder_results.add(key_name.split(delimiter)[0] + delimiter)
|
2013-02-27 06:12:11 +00:00
|
|
|
else:
|
|
|
|
key_results.add(key)
|
|
|
|
|
|
|
|
key_results = sorted(key_results, key=lambda key: key.name)
|
|
|
|
folder_results = [folder_name for folder_name in sorted(folder_results, key=lambda key: key)]
|
|
|
|
|
|
|
|
return key_results, folder_results
|
|
|
|
|
2013-02-18 22:17:19 +00:00
|
|
|
def delete_key(self, bucket_name, key_name):
|
2013-04-13 23:00:37 +00:00
|
|
|
key_name = clean_key_name(key_name)
|
2014-07-09 01:20:29 +00:00
|
|
|
bucket = self.get_bucket(bucket_name)
|
2013-02-18 22:17:19 +00:00
|
|
|
return bucket.keys.pop(key_name)
|
2013-02-18 21:09:40 +00:00
|
|
|
|
2014-03-26 15:52:31 +00:00
|
|
|
def copy_key(self, src_bucket_name, src_key_name, dest_bucket_name, dest_key_name, storage=None):
|
2013-04-13 23:00:37 +00:00
|
|
|
src_key_name = clean_key_name(src_key_name)
|
|
|
|
dest_key_name = clean_key_name(dest_key_name)
|
2014-07-09 01:20:29 +00:00
|
|
|
src_bucket = self.get_bucket(src_bucket_name)
|
|
|
|
dest_bucket = self.get_bucket(dest_bucket_name)
|
2014-03-26 15:41:07 +00:00
|
|
|
key = src_bucket.keys[src_key_name]
|
|
|
|
if dest_key_name != src_key_name:
|
|
|
|
key = key.copy(dest_key_name)
|
|
|
|
dest_bucket.keys[dest_key_name] = key
|
2014-03-26 15:52:31 +00:00
|
|
|
if storage is not None:
|
|
|
|
dest_bucket.keys[dest_key_name].set_storage_class(storage)
|
2013-02-18 21:09:40 +00:00
|
|
|
|
2013-02-18 21:31:42 +00:00
|
|
|
s3_backend = S3Backend()
|