Split uploaded media into separate buckets

We've operated for a while with just one set of s3 buckets for storing static
media (thumbnails, stylesheets, images uploaded for stylesheets).  However, due
to the additional-hop nature of image previews through imgix before reaching
s3, we're finding it'd be convenient to have imgix-served images in a separate
s3 bucket.

The media providers now take a *category* on `put()`.  We have a mapping from
category to bucket names, which will (at least for the time being) remain
`s3_media_buckets` for everything other than image previews.
This commit is contained in:
xiongchiamiov
2015-03-26 14:44:30 -07:00
parent e6a5b039f0
commit b40da70feb
4 changed files with 10 additions and 9 deletions

View File

@@ -266,6 +266,7 @@ S3KEY_ID =
S3SECRET_KEY =
# May be one bucket, or many buckets seperated by commas
s3_media_buckets =
s3_image_buckets =
# Store direct urls for images, rather than buckets
# For the bucket mybucket with the image helloworld.jpg the stored url would be:
# true: http://{s3_media_domain}/mybucket/helloworld.jpg

View File

@@ -227,7 +227,7 @@ def _filename_from_content(contents):
return base64.urlsafe_b64encode(hash_bytes).rstrip("=")
def upload_media(image, file_type='.jpg'):
def upload_media(image, file_type='.jpg', category='thumbs'):
"""Upload an image to the media provider."""
f = tempfile.NamedTemporaryFile(suffix=file_type, delete=False)
try:
@@ -258,7 +258,7 @@ def upload_media(image, file_type='.jpg'):
optimize_jpeg(f.name)
contents = open(f.name).read()
file_name = _filename_from_content(contents) + file_type
return g.media_provider.put(file_name, contents)
return g.media_provider.put(category, file_name, contents)
finally:
os.unlink(f.name)
return ""
@@ -266,7 +266,7 @@ def upload_media(image, file_type='.jpg'):
def upload_stylesheet(content):
file_name = _filename_from_content(content) + ".css"
return g.media_provider.put(file_name, content)
return g.media_provider.put('stylesheets', file_name, content)
def _scrape_media(url, autoplay=False, maxwidth=600, force=False,
@@ -402,7 +402,7 @@ def upload_icon(image_data, size):
image.thumbnail(size, Image.ANTIALIAS)
icon_data = _image_to_str(image)
file_name = _filename_from_content(icon_data)
return g.media_provider.put(file_name + ".png", icon_data)
return g.media_provider.put('icons', file_name + ".png", icon_data)
def _make_custom_media_embed(media_object):
@@ -515,7 +515,7 @@ class _ThumbnailOnlyScraper(Scraper):
uid = _filename_from_content(image_data)
image = str_to_image(image_data)
storage_url = upload_media(image)
storage_url = upload_media(image, category='previews')
width, height = image.size
preview_object = {
'uid': uid,
@@ -657,7 +657,7 @@ class _EmbedlyScraper(Scraper):
content_type, content = _fetch_url(thumbnail_url, referer=self.url)
uid = _filename_from_content(content)
image = str_to_image(content)
storage_url = upload_media(image)
storage_url = upload_media(image, category='previews')
width, height = image.size
preview_object = {
'uid': uid,

View File

@@ -28,7 +28,6 @@ class MediaProvider(object):
users to be able to view those objects over HTTP.
"""
def make_inaccessible(self, url):
"""Make the content unavaiable, but do not remove. Content could
be recovered at a later time.

View File

@@ -64,6 +64,7 @@ class S3MediaProvider(MediaProvider):
],
ConfigValue.tuple: [
"s3_media_buckets",
"s3_image_buckets",
],
}
@@ -114,8 +115,8 @@ class S3MediaProvider(MediaProvider):
buckets = getattr(g, self.buckets[category])
# choose a bucket based on the filename
name_without_extension = os.path.splitext(name)[0]
index = ord(name_without_extension[-1]) % len(g.s3_media_buckets)
bucket_name = g.s3_media_buckets[index]
index = ord(name_without_extension[-1]) % len(buckets)
bucket_name = buckets[index]
# guess the mime type
mime_type, encoding = mimetypes.guess_type(name)