add media features

This commit is contained in:
shuffman
2008-07-02 17:27:12 -07:00
parent 7bf5fab1fa
commit bcca862eaa
29 changed files with 810 additions and 62 deletions

View File

@@ -52,12 +52,17 @@ default_sr = localhost
admins =
page_cache_time = 30
static_path = /static/
useragent = Mozilla/5.0 (compatible; bot/1.0; ChangeMe)
solr_url =
SECRET = abcdefghijklmnopqrstuvwxyz0123456789
MODSECRET = abcdefghijklmnopqrstuvwxyz0123456789
ip_hash =
S3KEY_ID = ABCDEFGHIJKLMNOP1234
S3SECRET_KEY = aBcDeFgHiJkLmNoPqRsTuVwXyZ1234567890AbCd
s3_thumb_bucket = /your.bucket.here/
default_thumb = /static/noimage.png
MIN_DOWN_LINK = 0
MIN_UP_KARMA = 0
@@ -69,6 +74,10 @@ MIN_RATE_LIMIT_COMMENT_KARMA = 0
MODWINDOW = 2
HOT_PAGE_AGE = 1
#
media_period = 10 minutes
rising_period = 12 hours
# time of ratelimit purgatory (min)
RATELIMIT = 10

View File

@@ -657,6 +657,7 @@ class ApiController(RedditController):
ad_file = nop("ad_file"),
sr = VByName('sr'),
over_18 = VBoolean('over_18'),
show_media = VBoolean('show_media'),
type = VOneOf('type', ('public', 'private', 'restricted'))
)
def POST_site_admin(self, res, name ='', sr = None, **kw):
@@ -665,7 +666,7 @@ class ApiController(RedditController):
kw = dict((k, v) for k, v in kw.iteritems()
if v is not None
and k in ('name', 'title', 'description', 'firsttext',
'static_path', 'ad_file', 'over_18',
'static_path', 'ad_file', 'over_18', 'show_media',
'type', 'header', 'lang', 'stylesheet'))
#if a user is banned, return rate-limit errors

View File

@@ -177,7 +177,8 @@ class HotController(FixListing, ListingController):
o_links, pos = organic.organic_links(c.user)
if o_links:
# get links in proximity to pos
disp_links = [o_links[(i + pos) % len(o_links)] for i in xrange(-2, 8)]
l = min(len(o_links) - 3, 8)
disp_links = [o_links[(i + pos) % len(o_links)] for i in xrange(-2, l)]
b = IDBuilder(disp_links,
wrap = self.builder_wrapper)

View File

@@ -59,6 +59,7 @@ class PostController(ApiController):
pref_over_18 = VBoolean('over_18'),
pref_numsites = VInt('numsites', 1, 100),
pref_lang = VLang('lang'),
pref_media = VOneOf('media', ('on', 'off', 'subreddit')),
pref_compress = VBoolean('compress'),
pref_min_link_score = VInt('min_link_score', -100, 100),
pref_min_comment_score = VInt('min_comment_score', -100, 100),

View File

@@ -21,7 +21,7 @@
################################################################################
from __future__ import with_statement
from pylons import config
import pytz, os
import pytz, os, logging, sys
from datetime import timedelta
from r2.lib.cache import LocalCache, Memcache, CacheChain
from r2.lib.db.stats import QueryStats
@@ -146,9 +146,16 @@ class Globals(object):
full_name = os.path.join(log_path, fname)
os.remove(full_name)
#setup the logger
self.log = logging.getLogger('reddit')
if self.debug:
self.log.setLevel(logging.DEBUG)
self.log.addHandler(logging.StreamHandler())
def __del__(self):
"""
Put any cleanup code to be run when the application finally exits
here.
"""
pass

View File

@@ -525,6 +525,8 @@ class Client(local):
try:
for key in server_keys[server]: # These are mangled keys
store_info = self._val_to_store_info(mapping[prefixed_to_orig_key[key]], min_compress_len)
if not store_info:
continue
write("set %s %d %d %d\r\n%s\r\n" % (key, store_info[0], time, store_info[1], store_info[2]))
server.send_cmds(''.join(bigcmd))
except socket.error, msg:
@@ -568,10 +570,11 @@ class Client(local):
min_compress_len = 0
else:
flags |= Client._FLAG_PICKLE
val = pickle.dumps(val, 0) # Ack! JLR hacks it so that LinkedDict unpicling works w/o figuring out __reduce__.
val = pickle.dumps(val, -1) # Ack! JLR hacks it so that LinkedDict unpicling works w/o figuring out __reduce__.
# silently do not store if value length exceeds maximum
if len(val) >= SERVER_MAX_VALUE_LENGTH: return(0)
if len(val) >= SERVER_MAX_VALUE_LENGTH:
return (0)
lv = len(val)
# We should try to compress if min_compress_len > 0 and we could import zlib and this string is longer than our min threshold.
@@ -593,6 +596,8 @@ class Client(local):
self._statlog(cmd)
store_info = self._val_to_store_info(val, min_compress_len)
if not store_info:
return 0
fullcmd = "%s %s %d %d %d\r\n%s" % (cmd, key, store_info[0], time, store_info[1], store_info[2])
try:

View File

@@ -22,18 +22,21 @@
from r2.models import Link, Subreddit
from r2.lib import utils
from pylons import g
count_period = g.rising_period
#stubs
def incr_counts(wrapped):
pass
def get_link_counts(period = '12 hours'):
def get_link_counts(period = count_period):
links = Link._query(Link.c._date >= utils.timeago(period),
limit=50, data = True)
return dict((l._fullname, (0, l.sr_id)) for l in links)
def get_sr_counts(period = '12 hours'):
def get_sr_counts(period = count_period):
srs = Subreddit._query()
return dict((l._fullname, (0, l.sr_id)) for l in links)

112
r2/r2/lib/media.py Normal file
View File

@@ -0,0 +1,112 @@
# The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is Reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of the
# Original Code is CondeNet, Inc.
#
# All portions of the code written by CondeNet are Copyright (c) 2006-2008
# CondeNet, Inc. All Rights Reserved.
################################################################################
from pylons import g, config
from r2.models.link import Link
from r2.lib.workqueue import WorkQueue
from r2.lib import s3cp
from r2.lib.utils import timeago, fetch_things2
from r2.lib.db.operators import desc
from r2.lib.scraper import make_scraper
import tempfile
from Queue import Queue
s3_thumbnail_bucket = g.s3_thumb_bucket
media_period = g.media_period
threads = 20
log = g.log
def thumbnail_url(link):
"""Given a link, returns the url for its thumbnail based on its fullname"""
return 'http:/%s%s.png' % (s3_thumbnail_bucket, link._fullname)
def upload_thumb(link, image):
"""Given a link and an image, uploads the image to s3 into an image
based on the link's fullname"""
f = tempfile.NamedTemporaryFile(suffix = '.png')
image.save(f)
resource = s3_thumbnail_bucket + link._fullname + '.png'
log.debug('uploading to s3: %s' % link._fullname)
s3cp.send_file(f.name, resource, 'image/png', 'public-read', None, False)
log.debug('thumbnail %s: %s' % (link._fullname, thumbnail_url(link)))
def make_link_info_job(results, link, useragent):
"""Returns a unit of work to send to a work queue that downloads a
link's thumbnail and media object. Places the result in the results
dict"""
def job():
scraper = make_scraper(link.url)
thumbnail = scraper.thumbnail()
media_object = scraper.media_object()
if thumbnail:
upload_thumb(link, thumbnail)
results[link] = (thumbnail, media_object)
return job
def update_link(link, thumbnail, media_object):
"""Sets the link's has_thumbnail and media_object attributes iin the
database."""
if thumbnail:
link.has_thumbnail = True
if media_object:
link.media_object = media_object
link._commit()
def process_new_links(period = media_period, force = False):
"""Fetches links from the last period and sets their media
properities. If force is True, it will fetch properities for links
even if the properties already exist"""
links = Link._query(Link.c._date > timeago(period), sort = desc('_date'),
data = True)
results = {}
jobs = []
for link in fetch_things2(links):
if link.is_self:
continue
if not force and (link.has_thumbnail or link.media_object):
continue
jobs.append(make_link_info_job(results, link, g.useragent))
#send links to a queue
wq = WorkQueue(jobs, num_workers = 20)
wq.start()
wq.jobs.join()
#when the queue is finished, do the db writes in this thread
for link, info in results.items():
update_link(link, info[0], info[1])
def set_media(link):
"""Sets the media properties for a single link."""
results = {}
make_link_info_job(results, link, g.useragent)()
update_link(link, *results[link])

100
r2/r2/lib/s3cp.py Normal file
View File

@@ -0,0 +1,100 @@
#!/usr/bin/env python
# The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is Reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of the
# Original Code is CondeNet, Inc.
#
# All portions of the code written by CondeNet are Copyright (c) 2006-2008
# CondeNet, Inc. All Rights Reserved.
################################################################################
import base64, hmac, sha, os, sys, getopt
from datetime import datetime
from pylons import g,config
KEY_ID = g.S3KEY_ID
SECRET_KEY = g.S3SECRET_KEY
class S3Exception(Exception): pass
def make_header(verb, date, amz_headers, resource, content_type):
content_md5 = ''
#amazon headers
lower_head = dict((key.lower(), val)
for key, val in amz_headers.iteritems())
keys = lower_head.keys()
keys.sort()
amz_lst = ['%s:%s' % (key, lower_head[key]) for key in keys]
amz_str = '\n'.join(amz_lst)
s = '\n'.join((verb,
content_md5,
content_type,
date,
amz_str,
resource))
h = hmac.new(SECRET_KEY, s, sha)
return base64.encodestring(h.digest()).strip()
def send_file(filename, resource, content_type, acl, rate, meter):
date = datetime.utcnow().strftime("%a, %d %b %Y %X GMT")
amz_headers = {'x-amz-acl': acl}
auth_header = make_header('PUT', date, amz_headers, resource, content_type)
params = ['-T', filename,
'-H', 'x-amz-acl: %s' % amz_headers['x-amz-acl'],
'-H', 'Authorization: AWS %s:%s' % (KEY_ID, auth_header),
'-H', 'Date: %s' % date]
if content_type:
params.append('-H')
params.append('Content-Type: %s' % content_type)
if rate:
params.append('--limit-rate')
params.append(rate)
if meter:
params.append('-o')
params.append('s3cp.output')
else:
params.append('-s')
params.append('https://s3.amazonaws.com%s' % resource)
exit_code = os.spawnlp(os.P_WAIT, 'curl', 'curl', *params)
if exit_code:
raise S3Exception(exit_code)
if __name__ == '__main__':
options = "a:c:l:m"
try:
opts, args = getopt.getopt(sys.argv[1:], options)
except:
sys.exit(2)
opts = dict(opts)
send_file(args[0], args[1],
opts.get('-c', ''),
opts.get('-a', 'private'),
opts.get('-l'),
opts.has_key('-m'))

265
r2/r2/lib/scraper.py Normal file
View File

@@ -0,0 +1,265 @@
# The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is Reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of the
# Original Code is CondeNet, Inc.
#
# All portions of the code written by CondeNet are Copyright (c) 2006-2008
# CondeNet, Inc. All Rights Reserved.
################################################################################
from pylons import g
from r2.lib import utils
from r2.lib.memoize import memoize
from urllib2 import Request, HTTPError, URLError, urlopen
import urlparse, re, urllib, logging, StringIO, logging
import Image, ImageFile
log = g.log
useragent = g.useragent
chunk_size = 1024
thumbnail_size = 70, 70
def image_to_str(image):
s = StringIO.StringIO()
image.save(s, image.format)
s.seek(0)
return s.read()
def str_to_image(s):
s = StringIO.StringIO(s)
s.seek(0)
image = Image.open(s)
return image
@memoize('media.fetch_url')
def fetch_url(url, referer = None, retries = 1, dimension = False):
cur_try = 0
#log.debug('fetching: %s' % url)
nothing = None if dimension else (None, None)
while True:
try:
req = Request(url)
if useragent:
req.add_header('User-Agent', useragent)
if referer:
req.add_header('Referer', referer)
open_req = urlopen(req)
#if we only need the dimension of the image, we may not
#need the entire image
if dimension:
content = open_req.read(chunk_size)
else:
content = open_req.read()
content_type = open_req.headers.get('content-type')
if 'image' in content_type:
p = ImageFile.Parser()
new_data = content
while not p.image and new_data:
p.feed(new_data)
new_data = open_req.read(chunk_size)
content += new_data
#return the size, or return the data
if dimension and p.image:
return p.image.size
elif dimension:
return nothing
elif dimension:
#expected an image, but didn't get one
return nothing
return content_type, content
except (URLError, HTTPError), e:
cur_try += 1
if cur_try >= retries:
log.debug('error while fetching: %s referer: %s' % (url, referer))
log.debug(e)
return nothing
finally:
if 'open_req' in locals():
open_req.close()
img_rx = re.compile(r'<\s*(?:img)[^>]*src\s*=\s*[\"\']?([^\"\'\s>]*)[^>]*', re.IGNORECASE | re.S)
def image_urls(base_url, html):
for match in img_rx.findall(html):
image_url = urlparse.urljoin(base_url, match)
yield image_url
class Scraper:
def __init__(self, url):
self.url = url
self.content = None
self.content_type = None
def download(self):
self.content_type, self.content = fetch_url(self.url)
def largest_image_url(self):
if not self.content:
self.download()
#if download didn't work
if not self.content:
return None
max_area = 0
max_url = None
#if the original url was an image, use that
if 'image' in self.content_type:
urls = [self.url]
else:
urls = image_urls(self.url, self.content)
for image_url in urls:
size = fetch_url(image_url, referer = self.url, dimension = True)
if not size:
continue
area = size[0] * size[1]
#ignore little images
if area < 5000:
log.debug('ignore little %s' % image_url)
continue
#ignore excessively long/wide images
if max(size) / min(size) > 1.5:
log.debug('ignore dimensions %s' % image_url)
continue
if area > max_area:
max_area = area
max_url = image_url
return max_url
def thumbnail(self):
image_url = self.largest_image_url()
if image_url:
content_type, image_str = fetch_url(image_url, referer = self.url)
if image_str:
image = str_to_image(image_str)
image.thumbnail(thumbnail_size, Image.ANTIALIAS)
return image
def media_object(self):
return None
youtube_rx = re.compile('.*v=([A-Za-z0-9-_]+).*')
class YoutubeScraper(Scraper):
media_template = '<object width="425" height="350"><param name="movie" value="http://www.youtube.com/v/%s"></param><param name="wmode" value="transparent"></param><embed src="http://www.youtube.com/v/%s" type="application/x-shockwave-flash" wmode="transparent" width="425" height="350"></embed></object>'
def __init__(self, url):
m = youtube_rx.match(url)
if m:
self.video_id = m.groups()[0]
else:
#if it's not a youtube video, just treat it like a normal page
log.debug('reverting youtube to regular scraper: %s' % url)
self.__class__ = Scraper
Scraper.__init__(self, url)
def largest_image_url(self):
return 'http://img.youtube.com/vi/%s/default.jpg' % self.video_id
def media_object(self):
return self.media_template % (self.video_id, self.video_id)
gootube_rx = re.compile('.*videoplay\?docid=([A-Za-z0-9-_]+).*')
gootube_thumb_rx = re.compile(".*thumbnail:\s*\'(http://[^/]+/ThumbnailServer2[^\']+)\'.*", re.IGNORECASE | re.S)
class GootubeScraper(Scraper):
media_template = '<embed style="width:400px; height:326px;" id="VideoPlayback" type="application/x-shockwave-flash" src="http://video.google.com/googleplayer.swf?docId=%s&hl=en" flashvars=""> </embed>'
def __init__(self, url):
m = gootube_rx.match(url)
if m:
self.video_id = m.groups()[0]
else:
self.__class__ = Scraper
Scraper.__init__(self, url)
def largest_image_url(self):
if not self.content:
self.download()
if not self.content:
return None
m = gootube_thumb_rx.match(self.content)
if m:
image_url = m.groups()[0]
image_url = utils.safe_eval_str(image_url)
return image_url
def media_object(self):
return self.media_template % self.video_id
scrapers = {'youtube.com': YoutubeScraper,
'video.google.com': GootubeScraper}
youtube_in_google_rx = re.compile('.*<div class="original-text">.*href="(http://[^"]*youtube.com/watch[^"]+).*', re.S)
def make_scraper(url):
scraper = scrapers.get(utils.domain(url), Scraper)
#sometimes youtube scrapers masquerade as google scrapers
if scraper == GootubeScraper:
h = Scraper(url)
h.download()
m = youtube_in_google_rx.match(h.content)
if m:
youtube_url = m.groups()[0]
log.debug('%s is really %s' % (url, youtube_url))
url = youtube_url
return make_scraper(url)
return scraper(url)
def test():
from r2.lib.pool2 import WorkQueue
jobs = []
f = open('/tmp/testurls.txt')
for url in f:
if url.startswith('#'):
continue
if url.startswith('/info'):
continue
def make_job(url):
def fetch(url):
print 'START', url
url = url.strip()
h = make_scraper(url)
image_url = h.largest_image_url()
print 'DONE', image_url
return lambda: fetch(url)
jobs.append(make_job(url))
print jobs[0]()
#wq = WorkQueue(jobs)
#wq.start()
if __name__ == '__main__':
test()

View File

@@ -742,3 +742,6 @@ def vote_hash(user, thing, note='valid'):
def valid_vote_hash(hash, user, thing):
return True
def safe_eval_str(unsafe_str):
return unsafe_str.replace('\\x3d', '=').replace('\\x26', '&')

90
r2/r2/lib/workqueue.py Normal file
View File

@@ -0,0 +1,90 @@
# The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is Reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of the
# Original Code is CondeNet, Inc.
#
# All portions of the code written by CondeNet are Copyright (c) 2006-2008
# CondeNet, Inc. All Rights Reserved.
################################################################################
from pylons import g
from Queue import Queue, Empty
from threading import Thread
from datetime import datetime, timedelta
import time
log = g.log
class WorkQueue(object):
"""A WorkQueue is a queue that takes a number of functions and runs
them in parallel"""
def __init__(self, jobs, num_workers = 5, timeout = 30):
"""Creates a WorkQueue that will process jobs with num_workers
threads. If a job takes longer than timeout seconds to run, WorkQueue
won't wait for it to finish before claiming to be finished."""
self.jobs = Queue()
self.work_count = Queue(num_workers)
self.workers = {}
self.timeout = timedelta(seconds = timeout)
for j in jobs:
self.jobs.put(j)
def monitor(self):
done = False
while not done:
if self.jobs.empty() and not self.workers:
done = True
for worker, start_time in self.workers.items():
if (not worker.isAlive() or
datetime.now() - start_time > self.timeout):
self.work_count.get_nowait()
self.jobs.task_done()
del self.workers[worker]
time.sleep(1)
def start(self):
monitor_thread = Thread(target = self.monitor)
monitor_thread.setDaemon(True)
monitor_thread.start()
while not self.jobs.empty():
job = self.jobs.get()
work_thread = Thread(target = job)
work_thread.setDaemon(True)
self.work_count.put(True)
self.workers[work_thread] = datetime.now()
work_thread.start()
if __name__ == '__main__':
def make_job(n):
import random, time
def job():
print 'starting %s' % n
time.sleep(random.randint(1, 10))
print 'ending %s' % n
return job
jobs = [make_job(n) for n in xrange(10)]
wq = WorkQueue(jobs, timeout = 2)
wq.start()
wq.jobs.join()
print 'DONE'

View File

@@ -54,8 +54,7 @@ class Wrapped(object):
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__,
self.lookups, self.context)
return '<%s %s>' % (self.__class__.__name__, self.lookups)
def template(self, style = 'html'):

View File

@@ -58,6 +58,7 @@ class Account(Thing):
spammer = 0,
sort_options = {},
has_subscribed = False,
pref_media = 'off',
)
def karma(self, kind, sr = None):

View File

@@ -45,6 +45,8 @@ class Link(Thing, Printable):
reported = 0, num_comments = 0,
moderator_banned = False,
banned_before_moderator = False,
media_object = None,
has_thumbnail = False,
ip = '0.0.0.0')
def __init__(self, *a, **kw):
@@ -201,6 +203,7 @@ class Link(Thing, Printable):
wrapped.show_spam,
wrapped.show_reports,
wrapped.can_ban,
wrapped.thumbnail,
wrapped.moderator_banned))
s = ''.join(s)
return s
@@ -216,6 +219,8 @@ class Link(Thing, Printable):
@classmethod
def add_props(cls, user, wrapped):
from r2.lib.count import incr_counts
from r2.lib.media import thumbnail_url
saved = Link._saved(user, wrapped) if user else {}
hidden = Link._hidden(user, wrapped) if user else {}
#clicked = Link._clicked(user, wrapped) if user else {}
@@ -223,6 +228,17 @@ class Link(Thing, Printable):
for item in wrapped:
show_media = (c.user.pref_media == 'on' or
(c.user.pref_media == 'subreddit' and
item.subreddit.show_media))
if not show_media:
item.thumbnail = ""
elif item.has_thumbnail:
item.thumbnail = thumbnail_url(item)
else:
item.thumbnail = g.default_thumb
item.score = max(0, item.score)
item.domain = (domain(item.url) if not item.is_self

View File

@@ -43,6 +43,7 @@ class Subreddit(Thing, Printable):
ad_file = os.path.join(g.static_path, 'ad_default.html'),
reported = 0,
valid_votes = 0,
show_media = False,
)
@classmethod

View File

@@ -131,9 +131,11 @@ Thing.prototype = {
compute_height:function() {
var arrows = this.$("arrows");
var entry = this.$("entry");
var thumb = this.$("thumbnail");
var num = this.$("num");
return Math.max(arrows ? arrows.offsetHeight : 0,
entry ? entry.offsetHeight : 0,
thumb ? thumb.offsetHeight : 0,
num ? num.offsetHeight : 0);
},
@@ -151,7 +153,8 @@ Thing.prototype = {
set_height: function(h) {
var entry = this.$('entry');
var arrows = this.$('arrows');
var thumb = this.$('thumbnail');
var arrows = this.$('arrows');
var num = this.$('num');
if(h == "fit" ||
(this.max_height() && h >= this.max_height() *.90 )) {
@@ -190,6 +193,7 @@ Thing.prototype = {
}
entry.style.height = h;
if(arrows) { arrows.style.height = h; }
if(thumb) { thumb.style.height = h; }
if(num) {
if (h)
num.style.marginTop = 0;
@@ -457,14 +461,15 @@ function linkstatus(form) {
return _global_fetching_tag;
}
function setClick(a) {
function setClick(a, css_class) {
css_class = css_class || "title";
var id = _id(a);
if (id) {
if(logged) {
a.className = "title loggedin click";
a.className = css_class + " loggedin click";
}
else {
a.className = "title click";
a.className = css_class + " click";
}
setClickCookie(id);
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 126 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 99 B

View File

@@ -26,9 +26,14 @@ h2 a:visited { color: #369 }
h2 a:hover { text-decoration: underline }
h3 { font-size:110%; /*text-transform:uppercase;*/ }
a img { border:none }
a img { border: 0 none; }
a { text-decoration: none; color: #369; }
/*
a:active { border: 0 none;}
a:focus { -moz-outline-style: none; }
*/
div.autosize { display: table; width: 1px}
div.autosize > div { display: table-cell; }
@@ -59,8 +64,6 @@ input.txt {
/* header / menus */
/*:-moz-any-link:focus { outline: none }*/
.hover a:hover { text-decoration: underline }
.selected { font-weight: bold; }
@@ -498,6 +501,17 @@ before enabling */
.tagline a.friend {color: orangered }
.tagline a:hover { text-decoration: underline }
.watch-play {
background: transparent url(/static/reddit-button-play.gif) no-repeat scroll right center;
padding-right: 15px;
color: #336699;
}
.watch-stop {
color: red;
}
.embededmedia { margin-top: 5px }
.title { color: blue; padding: 0px; overflow: hidden; }
.title:visited { color: #551a8b }
.title.click { color: #551a8b }
@@ -1213,8 +1227,14 @@ a.star { text-decoration: none; color: #ff8b60 }
#passform.pretty-form button { padding: 0px 1px; }
.prefleft { padding: 10px; font-weight: bold; vertical-align: top}
.prefright { padding: 10px }
.preftable th {
padding: 10px;
font-weight: bold;
vertical-align: top;
text-align: left;
}
.preftable td.prefright { padding: 10px }
.preftable .spacer { margin-top: 5px; margin-bottom: 5px; }
.over18 button { margin: 0 10px 0 10px; padding: 5px}
@@ -1237,3 +1257,9 @@ a.star { text-decoration: none; color: #ff8b60 }
color: #369; font-weight: bold;}
.stats td.ri { padding-left: 20px; text-align: right}
.thumbnail {
float: left;
margin: 0px 5px;
overflow: hidden;
}

View File

@@ -222,7 +222,7 @@ function handleResponse(action) {
if(field) {
for(var i in u) {
if(typeof(u[i]) != "function" && u != 'name') {
field[i] = u[i];
field[i] = unsafe(u[i]);
}
} }});
my_iter(r.hide,
@@ -343,3 +343,23 @@ function more(a_tag, new_label, div_on, div_off) {
function new_captcha() {
redditRequest("new_captcha");
}
function view_embeded_media(id, media_link) {
var eid = "embeded_media_" + id;
var watchid = "view_embeded_media_span_watch_" + id;
var closeid = "view_embeded_media_span_close_" + id;
var watchspan = document.getElementById(watchid);
var closespan = document.getElementById(closeid);
var e = document.getElementById(eid);
if (e.style.display == "none") {
e.style.display = "block";
e.innerHTML = media_link;
watchspan.style.display = "none";
closespan.style.display = "inline";
} else {
e.style.display = "none";
watchspan.style.display = "inline";
closespan.style.display = "none";
}
}

View File

@@ -17,7 +17,7 @@
## the Original Code is CondeNet, Inc.
##
## All portions of the code written by CondeNet are Copyright (c) 2006-2008
## CondeNet, Inc. All Rights Reserved.
## CondeNet, Inc. All Rights Reserved."
################################################################################
<%namespace file="utils.html" import="error_field"/>

View File

@@ -17,7 +17,7 @@
## the Original Code is CondeNet, Inc.
##
## All portions of the code written by CondeNet are Copyright (c) 2006-2008
## CondeNet, Inc. All Rights Reserved.
## CondeNet, Inc. All Rights Reserved."
################################################################################
<%!
@@ -67,7 +67,7 @@ function update_title() {
%if thing.site:
<input type="hidden" name="sr" value="${thing.site._fullname}"/>
%endif
<table>
<table class="content preftable">
<tr>
<th>
<label for="name">${_("name")}</label>
@@ -220,7 +220,7 @@ function update_title() {
<label>${_("type")}</label>
</th>
<td colspan="2">
<table>
<table class="spacer">
${radio_type(_("public"), _("anyone can view and submit"))}
${radio_type(_("restricted"), _("anyone can view, but only contributors can submit links"))}
${radio_type(_("private"), _("only contributors can view and submit"))}
@@ -237,6 +237,17 @@ function update_title() {
<label for="over_18">${_("viewers must be over eighteen years old")}</label>
</td>
</tr>
<tr>
<th><label>${_("media")}</label></th>
<td colspan="2">
<input class="nomargin" type="checkbox"
name="show_media" id="show_media"
${thing.site and thing.site.show_media and "checked='checked'" or ""}/>
<label for="show_media">
${_("show thumbnail images of content")}
</label>
</td>
</tr>
<tr>
<th>

View File

@@ -1,4 +1,4 @@
## "The contents of this file are subject to the Common Public Attribution
## The contents of this file are subject to the Common Public Attribution
## License Version 1.0. (the "License"); you may not use this file except in
## compliance with the License. You may obtain a copy of the License at
## http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
@@ -20,7 +20,9 @@
## CondeNet, Inc. All Rights Reserved.
################################################################################
<%! from r2.models.subreddit import Default %>
<%!
from r2.models.subreddit import Default
%>
<%inherit file="printable.html"/>
@@ -39,12 +41,10 @@
</span>
</%def>
<%def name="entry()">
<% fullname = thing._fullname %>
<p class="title" id="titlerow_${fullname}">
<a id="title_${fullname}"
onmousedown="setClick(this)"
class="title ${ c.user_is_loggedin and 'loggedin' or ''} ${thing.clicked and 'click' or ''}"
<%def name="make_link(name, css_class)">
<a id="${name}_${thing._fullname}"
onmousedown="setClick(this, '${css_class}')"
class="${css_class} ${ c.user_is_loggedin and 'loggedin' or ''} ${thing.clicked and 'click' or ''}"
%if c.user.pref_frame:
href="/goto?id=${thing._id36}"
%else:
@@ -57,18 +57,26 @@
target="_blank"
%endif
>
${thing.title}
</a>
${caller.body()}
</a>
</%def>
<%def name="entry()">
<p class="title" id="titlerow_${thing._fullname}">
<%call expr="make_link('title', 'title')">
${thing.title}
</%call>
&#32;
${unsafe(self.domain())}
</p>
<p class="tagline">
${self.tagline()}
</p>
<ul class="flat-list buttons">
${self.buttons()}
${self.admintagline()}
</ul>
</p>
<p class="tagline">
${self.tagline()}
</p>
<ul class="flat-list buttons">
${self.buttons()}
${self.admintagline()}
</ul>
${self.mediadiv()}
</%def>
<%def name="subreddit()" buffered="True">
@@ -92,6 +100,7 @@
%endif
${self.arrow(thing, 0, thing.likes == False)}
</div>
${self.thumbnail()}
</%def>
@@ -161,6 +170,43 @@
%endif
${parent.delete_or_report_buttons()}
${parent.buttons()}
${self.media_embed()}
</%def>
<%def name="media_embed()">
%if thing.media_object:
<li>
<a id="view_embeded_media_a_${thing._fullname}" class="" \
href="javascript:view_embeded_media('${thing._fullname}', '${thing.media_object}')">\
<span id="view_embeded_media_span_watch_${thing._fullname}"
class="watch-play"
style="display: inline">
${_("watch")}
</span>
<span id="view_embeded_media_span_close_${thing._fullname}"
class="watch-stop"
style="display: none">
${_("close")}
</span>
</a>
</li>
%endif
</%def>
<%def name="thumbnail()">
%if thing.thumbnail:
<%call expr="make_link('thumbnail', 'thumbnail')">
<img src="${thing.thumbnail}" alt="thumbnail for ${thing._fullname}"/>
</%call>
%endif
</%def>
<%def name="mediadiv()">
%if thing.media_object:
<div id="embeded_media_${thing._fullname}"
class="embededmedia" style="display: none;">
<p class="error">loading...</p>
</div>
%endif
</%def>

View File

@@ -17,7 +17,7 @@
## the Original Code is CondeNet, Inc.
##
## All portions of the code written by CondeNet are Copyright (c) 2006-2008
## CondeNet, Inc. All Rights Reserved.
## CondeNet, Inc. All Rights Reserved."
################################################################################
<%!

View File

@@ -17,7 +17,7 @@
## the Original Code is CondeNet, Inc.
##
## All portions of the code written by CondeNet are Copyright (c) 2006-2008
## CondeNet, Inc. All Rights Reserved.
## CondeNet, Inc. All Rights Reserved."
################################################################################
<%namespace file="help.html" import="help_or_hide"/>

View File

@@ -17,7 +17,7 @@
## the Original Code is CondeNet, Inc.
##
## All portions of the code written by CondeNet are Copyright (c) 2006-2008
## CondeNet, Inc. All Rights Reserved.
## CondeNet, Inc. All Rights Reserved."
################################################################################
<%namespace file="utils.html" import="language_tool, language_checkboxes"/>
@@ -34,11 +34,20 @@
<%def name="link_options()">
<select name="numsites" style="margin: 0 .5em 0 .5em">
%for x in [10, 25, 50, 100]:
<option ${x == c.user.pref_numsites and "selected='selected'" or ""}>${x}</option>
<option ${x == c.user.pref_numsites and "selected='selected'" or ""}>
${x}
</option>
%endfor
</select>
</%def>
<%def name="media_radio(val, label)">
<input id="media_${val}" class="nomargin"
type="radio" value="${val}" name="media"
${"checked='checked'" if c.user.pref_media == val else ''} />
<label for="media_${val}">${label}</label>
<br/>
</%def>
<%def name="num_input(s, name)">
<input type="text" size="4" maxlength="4"
@@ -54,8 +63,8 @@
<input type="hidden" name="uh" value="${c.modhash}" />
<table class="content preftable">
<tr class="prefrow">
<td class="prefleft">${_("interface language")}</td>
<tr>
<th>${_("interface language")}</th>
<td class="prefright">
${language_tool(allow_blank = False, show_regions = True,
default_lang = c.user.pref_lang)}
@@ -63,23 +72,36 @@
&#32;<a href="/feedback">${_("volunteer to translate")}</a></span>
</td>
</tr>
<tr class="prefrow">
<td class="prefleft">${_("content language")}</td>
<tr>
<th>${_("content language")}</th>
<td class="prefright">
${language_checkboxes(default = c.user.pref_content_langs)}
</td>
</tr>
%if c.user_is_loggedin:
<tr class="prefrow">
<td class="prefleft">${_("clicking options")}</td>
<tr>
<th>${_("clicking options")}</th>
<td class="prefright">
${checkbox(_("display links with a reddit toolbar"), "frame")}
<br/>
${checkbox(_("open links in a new window"), "newwindow")}
</td>
</tr>
<tr class="prefrow">
<td class="prefleft">${_("link options")}</td>
<tr>
<th>${_("media")}</th>
<td class="prefright">
%if not c.user.pref_compress:
${media_radio("on", _("show thumbnails next to links"))}
${media_radio("off", _("don't show thumbnails next to links"))}
${media_radio("subreddit", _("show thumbnails based on that reddit's media preferences"))}
%else:
<p class="error">${_("to enable thumbnails, disable compressed link display")}</p>
<input type="hidden" name="media" value="${c.user.pref_media}"/>
%endif
</td>
</tr>
<tr>
<th>${_("link options")}</th>
<td class="prefright">
${checkbox(_("show me new links on the front page"), "organic")}
<br/>
@@ -108,8 +130,8 @@
&#32;<span class="little gray">${_("(blank for none)")}</span>
</td>
</tr>
<tr class="prefrow">
<td class="prefleft">${_("comment options")}</td>
<tr>
<th>${_("comment options")}</th>
<td class="prefright">
<%
input = capture(num_input, c.user.pref_min_comment_score,
@@ -129,8 +151,8 @@
</td>
</tr>
<tr class="prefrow">
<td class="prefleft">${_("privacy options")}</td>
<tr>
<th>${_("privacy options")}</th>
<td class="prefright">
${checkbox(_("make my votes public"), "public_votes")}
<br/>

View File

@@ -17,7 +17,7 @@
## the Original Code is CondeNet, Inc.
##
## All portions of the code written by CondeNet are Copyright (c) 2006-2008
## CondeNet, Inc. All Rights Reserved.
## CondeNet, Inc. All Rights Reserved."
################################################################################
<%!
@@ -173,8 +173,6 @@ ${self.RenderPrintable()}
</div>
</%def>
<%def name="score(this, likes=None, inline=True, label = True, _id = True)">
<%
tag = "span" if inline else "div"
@@ -254,6 +252,12 @@ ${state_button(name, fullname, title, onclick, executed,
${title}</a>
</%def>
<%def name="advanced_button(fullname, args, title, nameFunc=None)">
<a id="${nameFunc}_a_${fullname}" class="" \
href="javascript:${nameFunc}(${args})">\
${title}</a>
</%def>
<%def name="tags(**kw)">
%for k, v in kw.iteritems():
%if v is not None:

View File

@@ -1,4 +1,4 @@
## "The contents of this file are subject to the Common Public Attribution
## "The contents of this file are subject to the Common Public Attribution.
## License Version 1.0. (the "License"); you may not use this file except in
## compliance with the License. You may obtain a copy of the License at
## http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
@@ -17,7 +17,7 @@
## the Original Code is CondeNet, Inc.
##
## All portions of the code written by CondeNet are Copyright (c) 2006-2008
## CondeNet, Inc. All Rights Reserved.
## CondeNet, Inc. All Rights Reserved."
################################################################################
<%!