diff --git a/r2/example.ini b/r2/example.ini
index 32da8eb4d..0e3b44aff 100644
--- a/r2/example.ini
+++ b/r2/example.ini
@@ -52,12 +52,17 @@ default_sr = localhost
admins =
page_cache_time = 30
static_path = /static/
+useragent = Mozilla/5.0 (compatible; bot/1.0; ChangeMe)
solr_url =
SECRET = abcdefghijklmnopqrstuvwxyz0123456789
MODSECRET = abcdefghijklmnopqrstuvwxyz0123456789
ip_hash =
+S3KEY_ID = ABCDEFGHIJKLMNOP1234
+S3SECRET_KEY = aBcDeFgHiJkLmNoPqRsTuVwXyZ1234567890AbCd
+s3_thumb_bucket = /your.bucket.here/
+default_thumb = /static/noimage.png
MIN_DOWN_LINK = 0
MIN_UP_KARMA = 0
@@ -69,6 +74,10 @@ MIN_RATE_LIMIT_COMMENT_KARMA = 0
MODWINDOW = 2
HOT_PAGE_AGE = 1
+#
+media_period = 10 minutes
+rising_period = 12 hours
+
# time of ratelimit purgatory (min)
RATELIMIT = 10
diff --git a/r2/r2/controllers/api.py b/r2/r2/controllers/api.py
index 14bc3ae39..8b6b2a543 100644
--- a/r2/r2/controllers/api.py
+++ b/r2/r2/controllers/api.py
@@ -657,6 +657,7 @@ class ApiController(RedditController):
ad_file = nop("ad_file"),
sr = VByName('sr'),
over_18 = VBoolean('over_18'),
+ show_media = VBoolean('show_media'),
type = VOneOf('type', ('public', 'private', 'restricted'))
)
def POST_site_admin(self, res, name ='', sr = None, **kw):
@@ -665,7 +666,7 @@ class ApiController(RedditController):
kw = dict((k, v) for k, v in kw.iteritems()
if v is not None
and k in ('name', 'title', 'description', 'firsttext',
- 'static_path', 'ad_file', 'over_18',
+ 'static_path', 'ad_file', 'over_18', 'show_media',
'type', 'header', 'lang', 'stylesheet'))
#if a user is banned, return rate-limit errors
diff --git a/r2/r2/controllers/listingcontroller.py b/r2/r2/controllers/listingcontroller.py
index a4899d085..98b7caa97 100644
--- a/r2/r2/controllers/listingcontroller.py
+++ b/r2/r2/controllers/listingcontroller.py
@@ -177,7 +177,8 @@ class HotController(FixListing, ListingController):
o_links, pos = organic.organic_links(c.user)
if o_links:
# get links in proximity to pos
- disp_links = [o_links[(i + pos) % len(o_links)] for i in xrange(-2, 8)]
+ l = min(len(o_links) - 3, 8)
+ disp_links = [o_links[(i + pos) % len(o_links)] for i in xrange(-2, l)]
b = IDBuilder(disp_links,
wrap = self.builder_wrapper)
diff --git a/r2/r2/controllers/post.py b/r2/r2/controllers/post.py
index 90a44835a..9ef5d8b02 100644
--- a/r2/r2/controllers/post.py
+++ b/r2/r2/controllers/post.py
@@ -59,6 +59,7 @@ class PostController(ApiController):
pref_over_18 = VBoolean('over_18'),
pref_numsites = VInt('numsites', 1, 100),
pref_lang = VLang('lang'),
+ pref_media = VOneOf('media', ('on', 'off', 'subreddit')),
pref_compress = VBoolean('compress'),
pref_min_link_score = VInt('min_link_score', -100, 100),
pref_min_comment_score = VInt('min_comment_score', -100, 100),
diff --git a/r2/r2/lib/app_globals.py b/r2/r2/lib/app_globals.py
index a8957652e..227850852 100644
--- a/r2/r2/lib/app_globals.py
+++ b/r2/r2/lib/app_globals.py
@@ -21,7 +21,7 @@
################################################################################
from __future__ import with_statement
from pylons import config
-import pytz, os
+import pytz, os, logging, sys
from datetime import timedelta
from r2.lib.cache import LocalCache, Memcache, CacheChain
from r2.lib.db.stats import QueryStats
@@ -146,9 +146,16 @@ class Globals(object):
full_name = os.path.join(log_path, fname)
os.remove(full_name)
+ #setup the logger
+ self.log = logging.getLogger('reddit')
+ if self.debug:
+ self.log.setLevel(logging.DEBUG)
+ self.log.addHandler(logging.StreamHandler())
+
def __del__(self):
"""
Put any cleanup code to be run when the application finally exits
here.
"""
pass
+
diff --git a/r2/r2/lib/contrib/memcache.py b/r2/r2/lib/contrib/memcache.py
index a25c22e9f..6a8524272 100644
--- a/r2/r2/lib/contrib/memcache.py
+++ b/r2/r2/lib/contrib/memcache.py
@@ -525,6 +525,8 @@ class Client(local):
try:
for key in server_keys[server]: # These are mangled keys
store_info = self._val_to_store_info(mapping[prefixed_to_orig_key[key]], min_compress_len)
+ if not store_info:
+ continue
write("set %s %d %d %d\r\n%s\r\n" % (key, store_info[0], time, store_info[1], store_info[2]))
server.send_cmds(''.join(bigcmd))
except socket.error, msg:
@@ -568,10 +570,11 @@ class Client(local):
min_compress_len = 0
else:
flags |= Client._FLAG_PICKLE
- val = pickle.dumps(val, 0) # Ack! JLR hacks it so that LinkedDict unpicling works w/o figuring out __reduce__.
+ val = pickle.dumps(val, -1) # Ack! JLR hacks it so that LinkedDict unpicling works w/o figuring out __reduce__.
# silently do not store if value length exceeds maximum
- if len(val) >= SERVER_MAX_VALUE_LENGTH: return(0)
+ if len(val) >= SERVER_MAX_VALUE_LENGTH:
+ return (0)
lv = len(val)
# We should try to compress if min_compress_len > 0 and we could import zlib and this string is longer than our min threshold.
@@ -593,6 +596,8 @@ class Client(local):
self._statlog(cmd)
store_info = self._val_to_store_info(val, min_compress_len)
+ if not store_info:
+ return 0
fullcmd = "%s %s %d %d %d\r\n%s" % (cmd, key, store_info[0], time, store_info[1], store_info[2])
try:
diff --git a/r2/r2/lib/count.py b/r2/r2/lib/count.py
index 220ed6ed9..e9bca586b 100644
--- a/r2/r2/lib/count.py
+++ b/r2/r2/lib/count.py
@@ -22,18 +22,21 @@
from r2.models import Link, Subreddit
from r2.lib import utils
+from pylons import g
+
+count_period = g.rising_period
#stubs
def incr_counts(wrapped):
pass
-def get_link_counts(period = '12 hours'):
+def get_link_counts(period = count_period):
links = Link._query(Link.c._date >= utils.timeago(period),
limit=50, data = True)
return dict((l._fullname, (0, l.sr_id)) for l in links)
-def get_sr_counts(period = '12 hours'):
+def get_sr_counts(period = count_period):
srs = Subreddit._query()
return dict((l._fullname, (0, l.sr_id)) for l in links)
diff --git a/r2/r2/lib/media.py b/r2/r2/lib/media.py
new file mode 100644
index 000000000..ce4e072ef
--- /dev/null
+++ b/r2/r2/lib/media.py
@@ -0,0 +1,112 @@
+# The contents of this file are subject to the Common Public Attribution
+# License Version 1.0. (the "License"); you may not use this file except in
+# compliance with the License. You may obtain a copy of the License at
+# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
+# License Version 1.1, but Sections 14 and 15 have been added to cover use of
+# software over a computer network and provide for limited attribution for the
+# Original Developer. In addition, Exhibit A has been modified to be consistent
+# with Exhibit B.
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
+# the specific language governing rights and limitations under the License.
+#
+# The Original Code is Reddit.
+#
+# The Original Developer is the Initial Developer. The Initial Developer of the
+# Original Code is CondeNet, Inc.
+#
+# All portions of the code written by CondeNet are Copyright (c) 2006-2008
+# CondeNet, Inc. All Rights Reserved.
+################################################################################
+
+from pylons import g, config
+
+from r2.models.link import Link
+from r2.lib.workqueue import WorkQueue
+from r2.lib import s3cp
+from r2.lib.utils import timeago, fetch_things2
+from r2.lib.db.operators import desc
+from r2.lib.scraper import make_scraper
+
+import tempfile
+from Queue import Queue
+
+s3_thumbnail_bucket = g.s3_thumb_bucket
+media_period = g.media_period
+threads = 20
+log = g.log
+
+def thumbnail_url(link):
+ """Given a link, returns the url for its thumbnail based on its fullname"""
+ return 'http:/%s%s.png' % (s3_thumbnail_bucket, link._fullname)
+
+def upload_thumb(link, image):
+ """Given a link and an image, uploads the image to s3 into an image
+ based on the link's fullname"""
+ f = tempfile.NamedTemporaryFile(suffix = '.png')
+ image.save(f)
+
+ resource = s3_thumbnail_bucket + link._fullname + '.png'
+ log.debug('uploading to s3: %s' % link._fullname)
+ s3cp.send_file(f.name, resource, 'image/png', 'public-read', None, False)
+ log.debug('thumbnail %s: %s' % (link._fullname, thumbnail_url(link)))
+
+def make_link_info_job(results, link, useragent):
+ """Returns a unit of work to send to a work queue that downloads a
+ link's thumbnail and media object. Places the result in the results
+ dict"""
+ def job():
+ scraper = make_scraper(link.url)
+
+ thumbnail = scraper.thumbnail()
+ media_object = scraper.media_object()
+
+ if thumbnail:
+ upload_thumb(link, thumbnail)
+
+ results[link] = (thumbnail, media_object)
+ return job
+
+def update_link(link, thumbnail, media_object):
+ """Sets the link's has_thumbnail and media_object attributes iin the
+ database."""
+ if thumbnail:
+ link.has_thumbnail = True
+
+ if media_object:
+ link.media_object = media_object
+
+ link._commit()
+
+def process_new_links(period = media_period, force = False):
+ """Fetches links from the last period and sets their media
+ properities. If force is True, it will fetch properities for links
+ even if the properties already exist"""
+ links = Link._query(Link.c._date > timeago(period), sort = desc('_date'),
+ data = True)
+ results = {}
+ jobs = []
+ for link in fetch_things2(links):
+ if link.is_self:
+ continue
+
+ if not force and (link.has_thumbnail or link.media_object):
+ continue
+
+ jobs.append(make_link_info_job(results, link, g.useragent))
+
+ #send links to a queue
+ wq = WorkQueue(jobs, num_workers = 20)
+ wq.start()
+ wq.jobs.join()
+
+ #when the queue is finished, do the db writes in this thread
+ for link, info in results.items():
+ update_link(link, info[0], info[1])
+
+def set_media(link):
+ """Sets the media properties for a single link."""
+ results = {}
+ make_link_info_job(results, link, g.useragent)()
+ update_link(link, *results[link])
diff --git a/r2/r2/lib/s3cp.py b/r2/r2/lib/s3cp.py
new file mode 100644
index 000000000..652734e35
--- /dev/null
+++ b/r2/r2/lib/s3cp.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+
+# The contents of this file are subject to the Common Public Attribution
+# License Version 1.0. (the "License"); you may not use this file except in
+# compliance with the License. You may obtain a copy of the License at
+# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
+# License Version 1.1, but Sections 14 and 15 have been added to cover use of
+# software over a computer network and provide for limited attribution for the
+# Original Developer. In addition, Exhibit A has been modified to be consistent
+# with Exhibit B.
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
+# the specific language governing rights and limitations under the License.
+#
+# The Original Code is Reddit.
+#
+# The Original Developer is the Initial Developer. The Initial Developer of the
+# Original Code is CondeNet, Inc.
+#
+# All portions of the code written by CondeNet are Copyright (c) 2006-2008
+# CondeNet, Inc. All Rights Reserved.
+################################################################################
+
+import base64, hmac, sha, os, sys, getopt
+from datetime import datetime
+from pylons import g,config
+
+KEY_ID = g.S3KEY_ID
+SECRET_KEY = g.S3SECRET_KEY
+
+class S3Exception(Exception): pass
+
+def make_header(verb, date, amz_headers, resource, content_type):
+ content_md5 = ''
+
+ #amazon headers
+ lower_head = dict((key.lower(), val)
+ for key, val in amz_headers.iteritems())
+ keys = lower_head.keys()
+ keys.sort()
+ amz_lst = ['%s:%s' % (key, lower_head[key]) for key in keys]
+ amz_str = '\n'.join(amz_lst)
+
+ s = '\n'.join((verb,
+ content_md5,
+ content_type,
+ date,
+ amz_str,
+ resource))
+
+ h = hmac.new(SECRET_KEY, s, sha)
+ return base64.encodestring(h.digest()).strip()
+
+def send_file(filename, resource, content_type, acl, rate, meter):
+ date = datetime.utcnow().strftime("%a, %d %b %Y %X GMT")
+ amz_headers = {'x-amz-acl': acl}
+
+ auth_header = make_header('PUT', date, amz_headers, resource, content_type)
+
+ params = ['-T', filename,
+ '-H', 'x-amz-acl: %s' % amz_headers['x-amz-acl'],
+ '-H', 'Authorization: AWS %s:%s' % (KEY_ID, auth_header),
+ '-H', 'Date: %s' % date]
+
+ if content_type:
+ params.append('-H')
+ params.append('Content-Type: %s' % content_type)
+
+ if rate:
+ params.append('--limit-rate')
+ params.append(rate)
+
+ if meter:
+ params.append('-o')
+ params.append('s3cp.output')
+ else:
+ params.append('-s')
+
+ params.append('https://s3.amazonaws.com%s' % resource)
+
+ exit_code = os.spawnlp(os.P_WAIT, 'curl', 'curl', *params)
+ if exit_code:
+ raise S3Exception(exit_code)
+
+
+if __name__ == '__main__':
+ options = "a:c:l:m"
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], options)
+ except:
+ sys.exit(2)
+
+ opts = dict(opts)
+
+ send_file(args[0], args[1],
+ opts.get('-c', ''),
+ opts.get('-a', 'private'),
+ opts.get('-l'),
+ opts.has_key('-m'))
diff --git a/r2/r2/lib/scraper.py b/r2/r2/lib/scraper.py
new file mode 100644
index 000000000..5ee4751f1
--- /dev/null
+++ b/r2/r2/lib/scraper.py
@@ -0,0 +1,265 @@
+# The contents of this file are subject to the Common Public Attribution
+# License Version 1.0. (the "License"); you may not use this file except in
+# compliance with the License. You may obtain a copy of the License at
+# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
+# License Version 1.1, but Sections 14 and 15 have been added to cover use of
+# software over a computer network and provide for limited attribution for the
+# Original Developer. In addition, Exhibit A has been modified to be consistent
+# with Exhibit B.
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
+# the specific language governing rights and limitations under the License.
+#
+# The Original Code is Reddit.
+#
+# The Original Developer is the Initial Developer. The Initial Developer of the
+# Original Code is CondeNet, Inc.
+#
+# All portions of the code written by CondeNet are Copyright (c) 2006-2008
+# CondeNet, Inc. All Rights Reserved.
+################################################################################
+
+from pylons import g
+from r2.lib import utils
+from r2.lib.memoize import memoize
+
+from urllib2 import Request, HTTPError, URLError, urlopen
+import urlparse, re, urllib, logging, StringIO, logging
+import Image, ImageFile
+
+log = g.log
+useragent = g.useragent
+
+chunk_size = 1024
+thumbnail_size = 70, 70
+
+def image_to_str(image):
+ s = StringIO.StringIO()
+ image.save(s, image.format)
+ s.seek(0)
+ return s.read()
+
+def str_to_image(s):
+ s = StringIO.StringIO(s)
+ s.seek(0)
+ image = Image.open(s)
+ return image
+
+@memoize('media.fetch_url')
+def fetch_url(url, referer = None, retries = 1, dimension = False):
+ cur_try = 0
+ #log.debug('fetching: %s' % url)
+ nothing = None if dimension else (None, None)
+ while True:
+ try:
+ req = Request(url)
+ if useragent:
+ req.add_header('User-Agent', useragent)
+ if referer:
+ req.add_header('Referer', referer)
+
+ open_req = urlopen(req)
+
+ #if we only need the dimension of the image, we may not
+ #need the entire image
+ if dimension:
+ content = open_req.read(chunk_size)
+ else:
+ content = open_req.read()
+ content_type = open_req.headers.get('content-type')
+
+ if 'image' in content_type:
+ p = ImageFile.Parser()
+ new_data = content
+ while not p.image and new_data:
+ p.feed(new_data)
+ new_data = open_req.read(chunk_size)
+ content += new_data
+
+ #return the size, or return the data
+ if dimension and p.image:
+ return p.image.size
+ elif dimension:
+ return nothing
+ elif dimension:
+ #expected an image, but didn't get one
+ return nothing
+
+ return content_type, content
+
+ except (URLError, HTTPError), e:
+ cur_try += 1
+ if cur_try >= retries:
+ log.debug('error while fetching: %s referer: %s' % (url, referer))
+ log.debug(e)
+ return nothing
+ finally:
+ if 'open_req' in locals():
+ open_req.close()
+
+img_rx = re.compile(r'<\s*(?:img)[^>]*src\s*=\s*[\"\']?([^\"\'\s>]*)[^>]*', re.IGNORECASE | re.S)
+def image_urls(base_url, html):
+ for match in img_rx.findall(html):
+ image_url = urlparse.urljoin(base_url, match)
+ yield image_url
+
+class Scraper:
+ def __init__(self, url):
+ self.url = url
+ self.content = None
+ self.content_type = None
+
+ def download(self):
+ self.content_type, self.content = fetch_url(self.url)
+
+ def largest_image_url(self):
+ if not self.content:
+ self.download()
+
+ #if download didn't work
+ if not self.content:
+ return None
+
+ max_area = 0
+ max_url = None
+
+ #if the original url was an image, use that
+ if 'image' in self.content_type:
+ urls = [self.url]
+ else:
+ urls = image_urls(self.url, self.content)
+
+ for image_url in urls:
+ size = fetch_url(image_url, referer = self.url, dimension = True)
+ if not size:
+ continue
+
+ area = size[0] * size[1]
+
+ #ignore little images
+ if area < 5000:
+ log.debug('ignore little %s' % image_url)
+ continue
+
+ #ignore excessively long/wide images
+ if max(size) / min(size) > 1.5:
+ log.debug('ignore dimensions %s' % image_url)
+ continue
+
+ if area > max_area:
+ max_area = area
+ max_url = image_url
+
+ return max_url
+
+ def thumbnail(self):
+ image_url = self.largest_image_url()
+ if image_url:
+ content_type, image_str = fetch_url(image_url, referer = self.url)
+ if image_str:
+ image = str_to_image(image_str)
+ image.thumbnail(thumbnail_size, Image.ANTIALIAS)
+ return image
+
+ def media_object(self):
+ return None
+
+youtube_rx = re.compile('.*v=([A-Za-z0-9-_]+).*')
+
+class YoutubeScraper(Scraper):
+ media_template = ''
+
+ def __init__(self, url):
+ m = youtube_rx.match(url)
+ if m:
+ self.video_id = m.groups()[0]
+ else:
+ #if it's not a youtube video, just treat it like a normal page
+ log.debug('reverting youtube to regular scraper: %s' % url)
+ self.__class__ = Scraper
+
+ Scraper.__init__(self, url)
+
+ def largest_image_url(self):
+ return 'http://img.youtube.com/vi/%s/default.jpg' % self.video_id
+
+ def media_object(self):
+ return self.media_template % (self.video_id, self.video_id)
+
+gootube_rx = re.compile('.*videoplay\?docid=([A-Za-z0-9-_]+).*')
+gootube_thumb_rx = re.compile(".*thumbnail:\s*\'(http://[^/]+/ThumbnailServer2[^\']+)\'.*", re.IGNORECASE | re.S)
+
+class GootubeScraper(Scraper):
+ media_template = ''
+ def __init__(self, url):
+ m = gootube_rx.match(url)
+ if m:
+ self.video_id = m.groups()[0]
+ else:
+ self.__class__ = Scraper
+ Scraper.__init__(self, url)
+
+ def largest_image_url(self):
+ if not self.content:
+ self.download()
+
+ if not self.content:
+ return None
+
+ m = gootube_thumb_rx.match(self.content)
+ if m:
+ image_url = m.groups()[0]
+ image_url = utils.safe_eval_str(image_url)
+ return image_url
+
+ def media_object(self):
+ return self.media_template % self.video_id
+
+scrapers = {'youtube.com': YoutubeScraper,
+ 'video.google.com': GootubeScraper}
+
+youtube_in_google_rx = re.compile('.*
.*href="(http://[^"]*youtube.com/watch[^"]+).*', re.S)
+
+def make_scraper(url):
+ scraper = scrapers.get(utils.domain(url), Scraper)
+
+ #sometimes youtube scrapers masquerade as google scrapers
+ if scraper == GootubeScraper:
+ h = Scraper(url)
+ h.download()
+ m = youtube_in_google_rx.match(h.content)
+ if m:
+ youtube_url = m.groups()[0]
+ log.debug('%s is really %s' % (url, youtube_url))
+ url = youtube_url
+ return make_scraper(url)
+ return scraper(url)
+
+def test():
+ from r2.lib.pool2 import WorkQueue
+ jobs = []
+ f = open('/tmp/testurls.txt')
+ for url in f:
+ if url.startswith('#'):
+ continue
+ if url.startswith('/info'):
+ continue
+
+ def make_job(url):
+ def fetch(url):
+ print 'START', url
+ url = url.strip()
+ h = make_scraper(url)
+ image_url = h.largest_image_url()
+ print 'DONE', image_url
+ return lambda: fetch(url)
+
+ jobs.append(make_job(url))
+
+ print jobs[0]()
+ #wq = WorkQueue(jobs)
+ #wq.start()
+
+if __name__ == '__main__':
+ test()
diff --git a/r2/r2/lib/utils/utils.py b/r2/r2/lib/utils/utils.py
index e5f31b86e..88fd269b3 100644
--- a/r2/r2/lib/utils/utils.py
+++ b/r2/r2/lib/utils/utils.py
@@ -742,3 +742,6 @@ def vote_hash(user, thing, note='valid'):
def valid_vote_hash(hash, user, thing):
return True
+
+def safe_eval_str(unsafe_str):
+ return unsafe_str.replace('\\x3d', '=').replace('\\x26', '&')
diff --git a/r2/r2/lib/workqueue.py b/r2/r2/lib/workqueue.py
new file mode 100644
index 000000000..62716889a
--- /dev/null
+++ b/r2/r2/lib/workqueue.py
@@ -0,0 +1,90 @@
+# The contents of this file are subject to the Common Public Attribution
+# License Version 1.0. (the "License"); you may not use this file except in
+# compliance with the License. You may obtain a copy of the License at
+# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
+# License Version 1.1, but Sections 14 and 15 have been added to cover use of
+# software over a computer network and provide for limited attribution for the
+# Original Developer. In addition, Exhibit A has been modified to be consistent
+# with Exhibit B.
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
+# the specific language governing rights and limitations under the License.
+#
+# The Original Code is Reddit.
+#
+# The Original Developer is the Initial Developer. The Initial Developer of the
+# Original Code is CondeNet, Inc.
+#
+# All portions of the code written by CondeNet are Copyright (c) 2006-2008
+# CondeNet, Inc. All Rights Reserved.
+################################################################################
+
+from pylons import g
+from Queue import Queue, Empty
+from threading import Thread
+from datetime import datetime, timedelta
+import time
+
+log = g.log
+
+class WorkQueue(object):
+ """A WorkQueue is a queue that takes a number of functions and runs
+ them in parallel"""
+
+ def __init__(self, jobs, num_workers = 5, timeout = 30):
+ """Creates a WorkQueue that will process jobs with num_workers
+ threads. If a job takes longer than timeout seconds to run, WorkQueue
+ won't wait for it to finish before claiming to be finished."""
+ self.jobs = Queue()
+ self.work_count = Queue(num_workers)
+ self.workers = {}
+ self.timeout = timedelta(seconds = timeout)
+
+ for j in jobs:
+ self.jobs.put(j)
+
+ def monitor(self):
+ done = False
+ while not done:
+ if self.jobs.empty() and not self.workers:
+ done = True
+
+ for worker, start_time in self.workers.items():
+ if (not worker.isAlive() or
+ datetime.now() - start_time > self.timeout):
+ self.work_count.get_nowait()
+ self.jobs.task_done()
+ del self.workers[worker]
+
+ time.sleep(1)
+
+ def start(self):
+ monitor_thread = Thread(target = self.monitor)
+ monitor_thread.setDaemon(True)
+ monitor_thread.start()
+
+ while not self.jobs.empty():
+ job = self.jobs.get()
+
+ work_thread = Thread(target = job)
+ work_thread.setDaemon(True)
+ self.work_count.put(True)
+ self.workers[work_thread] = datetime.now()
+ work_thread.start()
+
+if __name__ == '__main__':
+ def make_job(n):
+ import random, time
+ def job():
+ print 'starting %s' % n
+ time.sleep(random.randint(1, 10))
+ print 'ending %s' % n
+ return job
+
+ jobs = [make_job(n) for n in xrange(10)]
+ wq = WorkQueue(jobs, timeout = 2)
+ wq.start()
+ wq.jobs.join()
+ print 'DONE'
+
diff --git a/r2/r2/lib/wrapped.py b/r2/r2/lib/wrapped.py
index f9615e138..cbbe988dd 100644
--- a/r2/r2/lib/wrapped.py
+++ b/r2/r2/lib/wrapped.py
@@ -54,8 +54,7 @@ class Wrapped(object):
def __repr__(self):
- return '<%s %s %s>' % (self.__class__.__name__,
- self.lookups, self.context)
+ return '<%s %s>' % (self.__class__.__name__, self.lookups)
def template(self, style = 'html'):
diff --git a/r2/r2/models/account.py b/r2/r2/models/account.py
index 86bb94e71..ce43a7ad8 100644
--- a/r2/r2/models/account.py
+++ b/r2/r2/models/account.py
@@ -58,6 +58,7 @@ class Account(Thing):
spammer = 0,
sort_options = {},
has_subscribed = False,
+ pref_media = 'off',
)
def karma(self, kind, sr = None):
diff --git a/r2/r2/models/link.py b/r2/r2/models/link.py
index 8fb1d758e..ca628cd9f 100644
--- a/r2/r2/models/link.py
+++ b/r2/r2/models/link.py
@@ -45,6 +45,8 @@ class Link(Thing, Printable):
reported = 0, num_comments = 0,
moderator_banned = False,
banned_before_moderator = False,
+ media_object = None,
+ has_thumbnail = False,
ip = '0.0.0.0')
def __init__(self, *a, **kw):
@@ -201,6 +203,7 @@ class Link(Thing, Printable):
wrapped.show_spam,
wrapped.show_reports,
wrapped.can_ban,
+ wrapped.thumbnail,
wrapped.moderator_banned))
s = ''.join(s)
return s
@@ -216,6 +219,8 @@ class Link(Thing, Printable):
@classmethod
def add_props(cls, user, wrapped):
from r2.lib.count import incr_counts
+ from r2.lib.media import thumbnail_url
+
saved = Link._saved(user, wrapped) if user else {}
hidden = Link._hidden(user, wrapped) if user else {}
#clicked = Link._clicked(user, wrapped) if user else {}
@@ -223,6 +228,17 @@ class Link(Thing, Printable):
for item in wrapped:
+ show_media = (c.user.pref_media == 'on' or
+ (c.user.pref_media == 'subreddit' and
+ item.subreddit.show_media))
+
+ if not show_media:
+ item.thumbnail = ""
+ elif item.has_thumbnail:
+ item.thumbnail = thumbnail_url(item)
+ else:
+ item.thumbnail = g.default_thumb
+
item.score = max(0, item.score)
item.domain = (domain(item.url) if not item.is_self
diff --git a/r2/r2/models/subreddit.py b/r2/r2/models/subreddit.py
index eff79cf3f..c2f329325 100644
--- a/r2/r2/models/subreddit.py
+++ b/r2/r2/models/subreddit.py
@@ -43,6 +43,7 @@ class Subreddit(Thing, Printable):
ad_file = os.path.join(g.static_path, 'ad_default.html'),
reported = 0,
valid_votes = 0,
+ show_media = False,
)
@classmethod
diff --git a/r2/r2/public/static/link.js b/r2/r2/public/static/link.js
index 4c7754691..424aceab9 100644
--- a/r2/r2/public/static/link.js
+++ b/r2/r2/public/static/link.js
@@ -131,9 +131,11 @@ Thing.prototype = {
compute_height:function() {
var arrows = this.$("arrows");
var entry = this.$("entry");
+ var thumb = this.$("thumbnail");
var num = this.$("num");
return Math.max(arrows ? arrows.offsetHeight : 0,
entry ? entry.offsetHeight : 0,
+ thumb ? thumb.offsetHeight : 0,
num ? num.offsetHeight : 0);
},
@@ -151,7 +153,8 @@ Thing.prototype = {
set_height: function(h) {
var entry = this.$('entry');
- var arrows = this.$('arrows');
+ var thumb = this.$('thumbnail');
+ var arrows = this.$('arrows');
var num = this.$('num');
if(h == "fit" ||
(this.max_height() && h >= this.max_height() *.90 )) {
@@ -190,6 +193,7 @@ Thing.prototype = {
}
entry.style.height = h;
if(arrows) { arrows.style.height = h; }
+ if(thumb) { thumb.style.height = h; }
if(num) {
if (h)
num.style.marginTop = 0;
@@ -457,14 +461,15 @@ function linkstatus(form) {
return _global_fetching_tag;
}
-function setClick(a) {
+function setClick(a, css_class) {
+ css_class = css_class || "title";
var id = _id(a);
if (id) {
if(logged) {
- a.className = "title loggedin click";
+ a.className = css_class + " loggedin click";
}
else {
- a.className = "title click";
+ a.className = css_class + " click";
}
setClickCookie(id);
}
diff --git a/r2/r2/public/static/reddit-button-play.gif b/r2/r2/public/static/reddit-button-play.gif
new file mode 100644
index 000000000..8ab0e7472
Binary files /dev/null and b/r2/r2/public/static/reddit-button-play.gif differ
diff --git a/r2/r2/public/static/reddit-button-stop.gif b/r2/r2/public/static/reddit-button-stop.gif
new file mode 100644
index 000000000..13af313b5
Binary files /dev/null and b/r2/r2/public/static/reddit-button-stop.gif differ
diff --git a/r2/r2/public/static/reddit.css b/r2/r2/public/static/reddit.css
index 497cabedf..6638a00e4 100644
--- a/r2/r2/public/static/reddit.css
+++ b/r2/r2/public/static/reddit.css
@@ -26,9 +26,14 @@ h2 a:visited { color: #369 }
h2 a:hover { text-decoration: underline }
h3 { font-size:110%; /*text-transform:uppercase;*/ }
-a img { border:none }
+a img { border: 0 none; }
a { text-decoration: none; color: #369; }
+/*
+a:active { border: 0 none;}
+a:focus { -moz-outline-style: none; }
+*/
+
div.autosize { display: table; width: 1px}
div.autosize > div { display: table-cell; }
@@ -59,8 +64,6 @@ input.txt {
/* header / menus */
-/*:-moz-any-link:focus { outline: none }*/
-
.hover a:hover { text-decoration: underline }
.selected { font-weight: bold; }
@@ -498,6 +501,17 @@ before enabling */
.tagline a.friend {color: orangered }
.tagline a:hover { text-decoration: underline }
+.watch-play {
+ background: transparent url(/static/reddit-button-play.gif) no-repeat scroll right center;
+ padding-right: 15px;
+ color: #336699;
+}
+.watch-stop {
+ color: red;
+}
+
+.embededmedia { margin-top: 5px }
+
.title { color: blue; padding: 0px; overflow: hidden; }
.title:visited { color: #551a8b }
.title.click { color: #551a8b }
@@ -1213,8 +1227,14 @@ a.star { text-decoration: none; color: #ff8b60 }
#passform.pretty-form button { padding: 0px 1px; }
-.prefleft { padding: 10px; font-weight: bold; vertical-align: top}
-.prefright { padding: 10px }
+.preftable th {
+ padding: 10px;
+ font-weight: bold;
+ vertical-align: top;
+ text-align: left;
+}
+.preftable td.prefright { padding: 10px }
+.preftable .spacer { margin-top: 5px; margin-bottom: 5px; }
.over18 button { margin: 0 10px 0 10px; padding: 5px}
@@ -1237,3 +1257,9 @@ a.star { text-decoration: none; color: #ff8b60 }
color: #369; font-weight: bold;}
.stats td.ri { padding-left: 20px; text-align: right}
+.thumbnail {
+ float: left;
+ margin: 0px 5px;
+ overflow: hidden;
+}
+
diff --git a/r2/r2/public/static/utils.js b/r2/r2/public/static/utils.js
index b2de1b23f..9f98fe6e3 100644
--- a/r2/r2/public/static/utils.js
+++ b/r2/r2/public/static/utils.js
@@ -222,7 +222,7 @@ function handleResponse(action) {
if(field) {
for(var i in u) {
if(typeof(u[i]) != "function" && u != 'name') {
- field[i] = u[i];
+ field[i] = unsafe(u[i]);
}
} }});
my_iter(r.hide,
@@ -343,3 +343,23 @@ function more(a_tag, new_label, div_on, div_off) {
function new_captcha() {
redditRequest("new_captcha");
}
+
+function view_embeded_media(id, media_link) {
+ var eid = "embeded_media_" + id;
+ var watchid = "view_embeded_media_span_watch_" + id;
+ var closeid = "view_embeded_media_span_close_" + id;
+ var watchspan = document.getElementById(watchid);
+ var closespan = document.getElementById(closeid);
+ var e = document.getElementById(eid);
+ if (e.style.display == "none") {
+ e.style.display = "block";
+ e.innerHTML = media_link;
+ watchspan.style.display = "none";
+ closespan.style.display = "inline";
+ } else {
+ e.style.display = "none";
+ watchspan.style.display = "inline";
+ closespan.style.display = "none";
+ }
+
+}
\ No newline at end of file
diff --git a/r2/r2/templates/commentreplybox.html b/r2/r2/templates/commentreplybox.html
index fbae92499..1b5eaf519 100644
--- a/r2/r2/templates/commentreplybox.html
+++ b/r2/r2/templates/commentreplybox.html
@@ -17,7 +17,7 @@
## the Original Code is CondeNet, Inc.
##
## All portions of the code written by CondeNet are Copyright (c) 2006-2008
-## CondeNet, Inc. All Rights Reserved.
+## CondeNet, Inc. All Rights Reserved."
################################################################################
<%namespace file="utils.html" import="error_field"/>
diff --git a/r2/r2/templates/createsubreddit.html b/r2/r2/templates/createsubreddit.html
index 35f4d08cd..065980c00 100644
--- a/r2/r2/templates/createsubreddit.html
+++ b/r2/r2/templates/createsubreddit.html
@@ -17,7 +17,7 @@
## the Original Code is CondeNet, Inc.
##
## All portions of the code written by CondeNet are Copyright (c) 2006-2008
-## CondeNet, Inc. All Rights Reserved.
+## CondeNet, Inc. All Rights Reserved."
################################################################################
<%!
@@ -67,7 +67,7 @@ function update_title() {
%if thing.site:
%endif
-
+
@@ -220,7 +220,7 @@ function update_title() {
-
+
${radio_type(_("public"), _("anyone can view and submit"))}
${radio_type(_("restricted"), _("anyone can view, but only contributors can submit links"))}
${radio_type(_("private"), _("only contributors can view and submit"))}
@@ -237,6 +237,17 @@ function update_title() {
+
+
+
+
+
+
+
diff --git a/r2/r2/templates/link.html b/r2/r2/templates/link.html
index 1d0603302..837f11b7a 100644
--- a/r2/r2/templates/link.html
+++ b/r2/r2/templates/link.html
@@ -1,4 +1,4 @@
-## "The contents of this file are subject to the Common Public Attribution
+## The contents of this file are subject to the Common Public Attribution
## License Version 1.0. (the "License"); you may not use this file except in
## compliance with the License. You may obtain a copy of the License at
## http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
@@ -20,7 +20,9 @@
## CondeNet, Inc. All Rights Reserved.
################################################################################
-<%! from r2.models.subreddit import Default %>
+<%!
+ from r2.models.subreddit import Default
+ %>
<%inherit file="printable.html"/>
@@ -39,12 +41,10 @@
%def>
-<%def name="entry()">
-<% fullname = thing._fullname %>
-
+ %endif
+%def>
diff --git a/r2/r2/templates/link.xml b/r2/r2/templates/link.xml
index cb66c4e26..86203e449 100644
--- a/r2/r2/templates/link.xml
+++ b/r2/r2/templates/link.xml
@@ -17,7 +17,7 @@
## the Original Code is CondeNet, Inc.
##
## All portions of the code written by CondeNet are Copyright (c) 2006-2008
-## CondeNet, Inc. All Rights Reserved.
+## CondeNet, Inc. All Rights Reserved."
################################################################################
<%!
diff --git a/r2/r2/templates/organiclisting.html b/r2/r2/templates/organiclisting.html
index e92a23b0c..76a6b6d8d 100644
--- a/r2/r2/templates/organiclisting.html
+++ b/r2/r2/templates/organiclisting.html
@@ -17,7 +17,7 @@
## the Original Code is CondeNet, Inc.
##
## All portions of the code written by CondeNet are Copyright (c) 2006-2008
-## CondeNet, Inc. All Rights Reserved.
+## CondeNet, Inc. All Rights Reserved."
################################################################################
<%namespace file="help.html" import="help_or_hide"/>
diff --git a/r2/r2/templates/prefoptions.html b/r2/r2/templates/prefoptions.html
index dc2b96a3c..3f82f98a9 100644
--- a/r2/r2/templates/prefoptions.html
+++ b/r2/r2/templates/prefoptions.html
@@ -17,7 +17,7 @@
## the Original Code is CondeNet, Inc.
##
## All portions of the code written by CondeNet are Copyright (c) 2006-2008
-## CondeNet, Inc. All Rights Reserved.
+## CondeNet, Inc. All Rights Reserved."
################################################################################
<%namespace file="utils.html" import="language_tool, language_checkboxes"/>
@@ -34,11 +34,20 @@
<%def name="link_options()">
%def>
+<%def name="media_radio(val, label)">
+
+
+
+%def>
<%def name="num_input(s, name)">
${checkbox(_("display links with a reddit toolbar"), "frame")}
${checkbox(_("open links in a new window"), "newwindow")}
-
-
${_("link options")}
+
+
${_("media")}
+
+ %if not c.user.pref_compress:
+ ${media_radio("on", _("show thumbnails next to links"))}
+ ${media_radio("off", _("don't show thumbnails next to links"))}
+ ${media_radio("subreddit", _("show thumbnails based on that reddit's media preferences"))}
+ %else:
+
${_("to enable thumbnails, disable compressed link display")}
+
+ %endif
+
+
+
+
${_("link options")}
${checkbox(_("show me new links on the front page"), "organic")}
@@ -108,8 +130,8 @@
${_("(blank for none)")}
${checkbox(_("make my votes public"), "public_votes")}
diff --git a/r2/r2/templates/printable.html b/r2/r2/templates/printable.html
index 13ee00f01..a81bb1389 100644
--- a/r2/r2/templates/printable.html
+++ b/r2/r2/templates/printable.html
@@ -17,7 +17,7 @@
## the Original Code is CondeNet, Inc.
##
## All portions of the code written by CondeNet are Copyright (c) 2006-2008
-## CondeNet, Inc. All Rights Reserved.
+## CondeNet, Inc. All Rights Reserved."
################################################################################
<%!
@@ -173,8 +173,6 @@ ${self.RenderPrintable()}
%def>
-
-
<%def name="score(this, likes=None, inline=True, label = True, _id = True)">
<%
tag = "span" if inline else "div"
@@ -254,6 +252,12 @@ ${state_button(name, fullname, title, onclick, executed,
${title}
%def>
+<%def name="advanced_button(fullname, args, title, nameFunc=None)">
+\
+ ${title}
+%def>
+
<%def name="tags(**kw)">
%for k, v in kw.iteritems():
%if v is not None:
diff --git a/r2/r2/templates/utils.html b/r2/r2/templates/utils.html
index 4539a401c..aa2d55d96 100644
--- a/r2/r2/templates/utils.html
+++ b/r2/r2/templates/utils.html
@@ -1,4 +1,4 @@
-## "The contents of this file are subject to the Common Public Attribution
+## "The contents of this file are subject to the Common Public Attribution.
## License Version 1.0. (the "License"); you may not use this file except in
## compliance with the License. You may obtain a copy of the License at
## http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
@@ -17,7 +17,7 @@
## the Original Code is CondeNet, Inc.
##
## All portions of the code written by CondeNet are Copyright (c) 2006-2008
-## CondeNet, Inc. All Rights Reserved.
+## CondeNet, Inc. All Rights Reserved."
################################################################################
<%!